|
@@ -0,0 +1,488 @@
|
|
|
+
|
|
|
+
|
|
|
+import matplotlib
|
|
|
+import matplotlib.pyplot as pyplot
|
|
|
+from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes,mark_inset
|
|
|
+import math
|
|
|
+import csv
|
|
|
+import json
|
|
|
+import sys
|
|
|
+
|
|
|
+
|
|
|
+experiment_num = int(sys.argv[1])
|
|
|
+
|
|
|
+
|
|
|
+num_trials = int(sys.argv[2])
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+num_days = 500
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+max_number_of_days_to_detect = 10
|
|
|
+
|
|
|
+
|
|
|
+if experiment_num == 1:
|
|
|
+ matplotlib.rcParams.update({'font.size': 14})
|
|
|
+else:
|
|
|
+ matplotlib.rcParams.update({'font.size': 14})
|
|
|
+
|
|
|
+
|
|
|
+width = 7.2
|
|
|
+
|
|
|
+
|
|
|
+def mean(my_list):
|
|
|
+ if len(my_list) == 0:
|
|
|
+ return None
|
|
|
+
|
|
|
+ sum = 0
|
|
|
+ for i in my_list:
|
|
|
+ sum += i
|
|
|
+ return sum / len(my_list)
|
|
|
+
|
|
|
+
|
|
|
+def std_dev(my_list):
|
|
|
+ if len(my_list) == 0:
|
|
|
+ return None
|
|
|
+
|
|
|
+ avg = mean(my_list)
|
|
|
+ sum = 0
|
|
|
+ for i in my_list:
|
|
|
+ sum += (i - avg)**2
|
|
|
+ sum /= len(my_list)
|
|
|
+ return math.sqrt(sum)
|
|
|
+
|
|
|
+
|
|
|
+if experiment_num == 1:
|
|
|
+
|
|
|
+
|
|
|
+ ind_var = [0.0, 0.01, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
|
|
|
+else:
|
|
|
+
|
|
|
+ ind_var = [0, 1, 2, 3, 4]
|
|
|
+
|
|
|
+
|
|
|
+overt = [ [] for i in range(len(ind_var)) ]
|
|
|
+flooding = [ [] for i in range(len(ind_var)) ]
|
|
|
+
|
|
|
+
|
|
|
+overt_tp = [ [] for i in range(len(ind_var)) ]
|
|
|
+overt_tn = [ [] for i in range(len(ind_var)) ]
|
|
|
+overt_fp = [ [] for i in range(len(ind_var)) ]
|
|
|
+overt_fn = [ [] for i in range(len(ind_var)) ]
|
|
|
+
|
|
|
+flooding_tp = [ [] for i in range(len(ind_var)) ]
|
|
|
+flooding_tn = [ [] for i in range(len(ind_var)) ]
|
|
|
+flooding_fp = [ [] for i in range(len(ind_var)) ]
|
|
|
+flooding_fn = [ [] for i in range(len(ind_var)) ]
|
|
|
+
|
|
|
+
|
|
|
+for bfile in sys.argv[3:]:
|
|
|
+ with open(bfile,'r') as bcsv:
|
|
|
+
|
|
|
+ bridges = csv.reader(bcsv, delimiter=',')
|
|
|
+
|
|
|
+
|
|
|
+ sfile = bfile[:-(len("bridges.csv"))] + "simulation_config.json"
|
|
|
+ with open(sfile,'r') as sjson:
|
|
|
+ config = json.load(sjson)
|
|
|
+ secrecy = config["censor_secrecy"]
|
|
|
+ if experiment_num == 1:
|
|
|
+ var = config["prob_user_submits_reports"]
|
|
|
+ index = ind_var.index(var)
|
|
|
+ else:
|
|
|
+ tfile = bfile[:-(len("bridges.csv"))] + "troll_patrol_config.json"
|
|
|
+ with open(tfile,'r') as tjson:
|
|
|
+ tconfig = json.load(tjson)
|
|
|
+
|
|
|
+ var = tconfig["max_threshold"]
|
|
|
+ index = ind_var.index(var)
|
|
|
+
|
|
|
+
|
|
|
+ startfile = bfile[:-(len("bridges.csv"))] + "start.csv"
|
|
|
+ with open(startfile,'r') as startcsv:
|
|
|
+ start_dates = csv.reader(startcsv, delimiter=',')
|
|
|
+ start_row = next(start_dates)
|
|
|
+ start_date = int(start_row[0])
|
|
|
+ end_date = start_date + num_days - 1
|
|
|
+
|
|
|
+
|
|
|
+ detection_times = []
|
|
|
+
|
|
|
+
|
|
|
+ true_pos = 0
|
|
|
+ true_neg = 0
|
|
|
+ false_pos = 0
|
|
|
+ false_neg = 0
|
|
|
+
|
|
|
+ for row in bridges:
|
|
|
+ if row[0] == "Full stats per bridge:" or row[0] == "Fingerprint":
|
|
|
+ continue
|
|
|
+
|
|
|
+
|
|
|
+ first_distributed = int(row[1])
|
|
|
+ first_real_user = int(row[2])
|
|
|
+ first_blocked = int(row[3])
|
|
|
+ first_detected_blocked = int(row[4])
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+ if first_distributed > end_date:
|
|
|
+ first_distributed = 0
|
|
|
+ if first_real_user > end_date:
|
|
|
+ first_real_user = 0
|
|
|
+ if first_blocked > end_date:
|
|
|
+ first_blocked = 0
|
|
|
+ if first_detected_blocked > end_date:
|
|
|
+ first_detected_blocked = 0
|
|
|
+
|
|
|
+
|
|
|
+ if first_real_user == 0:
|
|
|
+ continue
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+ if first_detected_blocked == 0:
|
|
|
+ if first_blocked == 0:
|
|
|
+ true_neg += 1
|
|
|
+ else:
|
|
|
+ false_neg += 1
|
|
|
+
|
|
|
+ else:
|
|
|
+ if first_blocked == 0 or first_detected_blocked < first_blocked:
|
|
|
+ false_pos += 1
|
|
|
+
|
|
|
+
|
|
|
+ elif first_detected_blocked - first_blocked > max_number_of_days_to_detect:
|
|
|
+ false_neg += 1
|
|
|
+ else:
|
|
|
+ true_pos += 1
|
|
|
+
|
|
|
+
|
|
|
+ detection_times.append(first_detected_blocked - first_blocked)
|
|
|
+
|
|
|
+ if secrecy == "Flooding":
|
|
|
+
|
|
|
+ flooding[index].extend(detection_times)
|
|
|
+
|
|
|
+ flooding_tp[index].append(true_pos)
|
|
|
+ flooding_tn[index].append(true_neg)
|
|
|
+ flooding_fp[index].append(false_pos)
|
|
|
+ flooding_fn[index].append(false_neg)
|
|
|
+ else:
|
|
|
+
|
|
|
+ overt[index].extend(detection_times)
|
|
|
+
|
|
|
+ overt_tp[index].append(true_pos)
|
|
|
+ overt_tn[index].append(true_neg)
|
|
|
+ overt_fp[index].append(false_pos)
|
|
|
+ overt_fn[index].append(false_neg)
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+ind_var_overt = []
|
|
|
+ind_var_flooding = []
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+overt_precision_means = []
|
|
|
+overt_precision_stddevs = []
|
|
|
+overt_recall_means = []
|
|
|
+overt_recall_stddevs = []
|
|
|
+
|
|
|
+
|
|
|
+for i in range(len(ind_var)):
|
|
|
+ precisions = []
|
|
|
+ recalls = []
|
|
|
+
|
|
|
+
|
|
|
+ if len(overt_tp[i]) > 0:
|
|
|
+ ind_var_overt.append(i)
|
|
|
+
|
|
|
+
|
|
|
+ for j in range(len(overt_tp[i])):
|
|
|
+ precisions.append(overt_tp[i][j] / (overt_tp[i][j] + overt_fp[i][j]))
|
|
|
+ recalls.append(overt_tp[i][j] / (overt_tp[i][j] + overt_fn[i][j]))
|
|
|
+
|
|
|
+
|
|
|
+ overt_precision_means.append(mean(precisions))
|
|
|
+ overt_precision_stddevs.append(std_dev(precisions))
|
|
|
+ overt_recall_means.append(mean(recalls))
|
|
|
+ overt_recall_stddevs.append(std_dev(recalls))
|
|
|
+
|
|
|
+flooding_precision_means = []
|
|
|
+flooding_precision_stddevs = []
|
|
|
+flooding_recall_means = []
|
|
|
+flooding_recall_stddevs = []
|
|
|
+
|
|
|
+
|
|
|
+for i in range(len(ind_var)):
|
|
|
+ precisions = []
|
|
|
+ recalls = []
|
|
|
+
|
|
|
+
|
|
|
+ if len(flooding_tp[i]) > 0:
|
|
|
+ ind_var_flooding.append(i)
|
|
|
+
|
|
|
+
|
|
|
+ for j in range(len(flooding_tp[i])):
|
|
|
+ precisions.append(flooding_tp[i][j] / (flooding_tp[i][j] + flooding_fp[i][j]))
|
|
|
+ recalls.append(flooding_tp[i][j] / (flooding_tp[i][j] + flooding_fn[i][j]))
|
|
|
+
|
|
|
+
|
|
|
+ flooding_precision_means.append(mean(precisions))
|
|
|
+ flooding_precision_stddevs.append(std_dev(precisions))
|
|
|
+ flooding_recall_means.append(mean(recalls))
|
|
|
+ flooding_recall_stddevs.append(std_dev(recalls))
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+if experiment_num == 1:
|
|
|
+ pyplot.violinplot([overt[i] for i in ind_var_overt], positions=[ind_var[i] for i in ind_var_overt], widths=0.04)
|
|
|
+ pyplot.title("Time to Detect Censorship (Overt Censor)")
|
|
|
+ pyplot.xlabel("Probability of users submitting reports")
|
|
|
+ pyplot.ylabel("Days to detect censorship")
|
|
|
+ pyplot.ylim(bottom=0)
|
|
|
+ pyplot.savefig("results/figure-2b.png")
|
|
|
+ pyplot.cla()
|
|
|
+
|
|
|
+else:
|
|
|
+ pyplot.violinplot([overt[i] for i in ind_var_overt], positions=[ind_var[i] for i in ind_var_overt])
|
|
|
+ pyplot.title("Time to Detect Censorship (Overt Censor)")
|
|
|
+ pyplot.xlabel("Harshness")
|
|
|
+ pyplot.xticks(ind_var)
|
|
|
+ pyplot.ylabel("Days to detect censorship")
|
|
|
+ pyplot.ylim(bottom=0)
|
|
|
+ pyplot.savefig("results/figure-3b.png")
|
|
|
+ pyplot.cla()
|
|
|
+
|
|
|
+
|
|
|
+if experiment_num == 1:
|
|
|
+
|
|
|
+ fv = pyplot.violinplot([flooding[i] for i in ind_var_flooding], positions=[ind_var[i] for i in ind_var_flooding], widths=0.045)
|
|
|
+else:
|
|
|
+ fv = pyplot.violinplot([flooding[i] for i in ind_var_flooding], positions=[ind_var[i] for i in ind_var_flooding])
|
|
|
+
|
|
|
+
|
|
|
+for pc in fv["bodies"]:
|
|
|
+ pc.set_facecolor("orange")
|
|
|
+ pc.set_edgecolor("orange")
|
|
|
+for part in ("cbars", "cmins", "cmaxes"):
|
|
|
+ fv[part].set_edgecolor("orange")
|
|
|
+
|
|
|
+if experiment_num == 1:
|
|
|
+ pyplot.title("Time to Detect Censorship (Flooding Censor)")
|
|
|
+ pyplot.xlabel("Probability of users submitting reports")
|
|
|
+ pyplot.ylabel("Days to detect censorship")
|
|
|
+ pyplot.ylim(bottom=0)
|
|
|
+ pyplot.savefig("results/figure-2c.png")
|
|
|
+ pyplot.cla()
|
|
|
+
|
|
|
+else:
|
|
|
+ pyplot.title("Time to Detect Censorship (Flooding Censor)")
|
|
|
+ pyplot.xlabel("Harshness")
|
|
|
+ pyplot.xticks(ind_var)
|
|
|
+ pyplot.ylabel("Days to detect censorship")
|
|
|
+ pyplot.ylim(bottom=0)
|
|
|
+ pyplot.savefig("results/figure-3c.png")
|
|
|
+ pyplot.cla()
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+if experiment_num == 1:
|
|
|
+
|
|
|
+ pyplot.ylim(0,1)
|
|
|
+ ax = pyplot
|
|
|
+ ax.errorbar([ind_var[i] for i in ind_var_overt], [overt_recall_means[i] for i in ind_var_overt], [overt_recall_stddevs[i] for i in ind_var_overt], linestyle="solid", marker='o', capsize=3)
|
|
|
+ ax.errorbar([ind_var[i] for i in ind_var_flooding], [flooding_recall_means[i] for i in ind_var_flooding], [flooding_recall_stddevs[i] for i in ind_var_flooding], linestyle="dotted", marker='v', capsize=3)
|
|
|
+ pyplot.xlabel("Probability of users submitting reports")
|
|
|
+ pyplot.xlim(0,1)
|
|
|
+ pyplot.ylabel("Recall")
|
|
|
+ pyplot.ylim(0,1)
|
|
|
+ pyplot.title("Proportion of Blocked Bridges Detected")
|
|
|
+ pyplot.legend(["Overt censor", "Flooding censor"], loc = "lower right")
|
|
|
+ pyplot.savefig("results/figure-2a.png")
|
|
|
+ pyplot.cla()
|
|
|
+
|
|
|
+else:
|
|
|
+ pyplot.xlim(0,1)
|
|
|
+ pyplot.ylim(0,1.02)
|
|
|
+ ax = pyplot.axes()
|
|
|
+ ax.errorbar([overt_recall_means[i] for i in ind_var_overt], [overt_precision_means[i] for i in ind_var_overt], xerr=[overt_recall_stddevs[i] for i in ind_var_overt], yerr=[overt_precision_stddevs[i] for i in ind_var_overt], marker='o', capsize=3, linestyle="solid")
|
|
|
+ ax.errorbar([flooding_recall_means[i] for i in ind_var_flooding], [flooding_precision_means[i] for i in ind_var_flooding], xerr=[flooding_recall_stddevs[i] for i in ind_var_flooding], yerr=[flooding_precision_stddevs[i] for i in ind_var_flooding], marker='v', capsize=3, linestyle="dotted")
|
|
|
+ pyplot.xlabel("Recall")
|
|
|
+ pyplot.xlim(0,1)
|
|
|
+ pyplot.ylabel("Precision")
|
|
|
+ pyplot.ylim(0,1.02)
|
|
|
+ pyplot.title("Precision vs. Recall")
|
|
|
+ pyplot.legend(["Overt censor", "Flooding censor"], loc = "lower left")
|
|
|
+
|
|
|
+
|
|
|
+ axins = zoomed_inset_axes(ax, zoom=1.75, bbox_to_anchor=(-0.325, -0.125, 1, 1), bbox_transform=ax.transAxes)
|
|
|
+ axins.errorbar([overt_recall_means[i] for i in ind_var_overt], [overt_precision_means[i] for i in ind_var_overt], xerr=[overt_recall_stddevs[i] for i in ind_var_overt], yerr=[overt_precision_stddevs[i] for i in ind_var_overt], marker='o', capsize=3, linestyle="solid")
|
|
|
+ axins.errorbar([flooding_recall_means[i] for i in ind_var_flooding], [flooding_precision_means[i] for i in ind_var_flooding], xerr=[flooding_recall_stddevs[i] for i in ind_var_flooding], yerr=[flooding_precision_stddevs[i] for i in ind_var_flooding], marker='v', capsize=3, linestyle="dotted")
|
|
|
+ pyplot.xlim(0.75,1)
|
|
|
+ pyplot.ylim(0.7,1.02)
|
|
|
+ mark_inset(ax, axins, loc1=2, loc2=4)
|
|
|
+ pyplot.savefig("results/figure-3a.png")
|
|
|
+ pyplot.cla()
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+def fmt(data, multiple_trials=True):
|
|
|
+
|
|
|
+ if not multiple_trials:
|
|
|
+ return f"{data[0]}"
|
|
|
+
|
|
|
+
|
|
|
+ m = mean(data)
|
|
|
+ s = std_dev(data)
|
|
|
+
|
|
|
+ if s == 0:
|
|
|
+ return f"{round(m)}$\\pm$0"
|
|
|
+
|
|
|
+
|
|
|
+ n = 10000
|
|
|
+ while round(s / n) < 1:
|
|
|
+ n /= 10
|
|
|
+ s = round(s / n) * n
|
|
|
+ m = round(m / n) * n
|
|
|
+
|
|
|
+ if s >= 1:
|
|
|
+ s = int(round(s))
|
|
|
+ elif s >= 0.1:
|
|
|
+ s = int(round(s*10)) / 10
|
|
|
+
|
|
|
+
|
|
|
+ if m >= 1:
|
|
|
+ m = int(round(m))
|
|
|
+ elif m >= 0.1:
|
|
|
+ m = int(round(m*10)) / 10
|
|
|
+
|
|
|
+ return f"{m}$\\pm${s}"
|
|
|
+
|
|
|
+def fmt_pr(m, s, multiple_trials=True):
|
|
|
+
|
|
|
+
|
|
|
+ if not multiple_trials:
|
|
|
+ m = int(round(m*1000)) / 1000
|
|
|
+ return f"{m}"
|
|
|
+
|
|
|
+ n = 1.0
|
|
|
+ while s > 0 and round(s / n) < 1:
|
|
|
+ n /= 10
|
|
|
+ s = round(s / n) * n
|
|
|
+ m = round(m / n) * n
|
|
|
+
|
|
|
+ if s >= 0.1:
|
|
|
+ s = int(round(s*10)) / 10
|
|
|
+ m = int(round(m*10)) / 10
|
|
|
+ elif s >= 0.01:
|
|
|
+ s = int(round(s*100)) / 100
|
|
|
+ m = int(round(m*100)) / 100
|
|
|
+ elif s >= 0.001:
|
|
|
+ s = int(round(s*1000)) / 1000
|
|
|
+ m = int(round(m*1000)) / 1000
|
|
|
+ elif s >= 0.0001:
|
|
|
+ s = int(round(s*10000)) / 10000
|
|
|
+ m = int(round(m*10000)) / 10000
|
|
|
+ elif s >= 0.00001:
|
|
|
+ s = int(round(s*100000)) / 100000
|
|
|
+ m = int(round(m*100000)) / 100000
|
|
|
+ elif s >= 0.000001:
|
|
|
+ s = int(round(s*1000000)) / 1000000
|
|
|
+ m = int(round(m*1000000)) / 1000000
|
|
|
+
|
|
|
+ return f"{m}$\\pm${s}"
|
|
|
+
|
|
|
+
|
|
|
+
|
|
|
+standalone_table_preamble = """\\documentclass{article}
|
|
|
+\\usepackage{standalone}
|
|
|
+\\usepackage{array}
|
|
|
+\\newcolumntype{C}[1]{>{\\centering\\arraybackslash}p{
|
|
|
+\\begin{document}"""
|
|
|
+
|
|
|
+
|
|
|
+if experiment_num == 1:
|
|
|
+ ind_var_str = "Prob. users submit reports"
|
|
|
+
|
|
|
+
|
|
|
+ with open("results/experiment-1-table-overt.tex", 'w') as f:
|
|
|
+ print(standalone_table_preamble, file=f)
|
|
|
+ print("""\\begin{table*}
|
|
|
+\\caption[Results of experiment 1 with overt censor]{Results of the first experiment with the \\textbf{overt censor}, specifically the mean and standard deviation number of true positives, true negatives, false positives, and false negatives for each set of trials. The independent variable in this experiment is the probability of users submitting reports.}
|
|
|
+\\label{experiment-1-results-overt}
|
|
|
+\\centering
|
|
|
+\\begin{tabular}[p]{|C{0.1\\textwidth}|C{0.1\\textwidth}|C{0.105\\textwidth}|C{0.1\\textwidth}|C{0.105\\textwidth}|c|c|}""", file=f)
|
|
|
+ print("\\hline", file=f)
|
|
|
+ print("\\textbf{" + ind_var_str + "} & \\textbf{True positives} & \\textbf{True negatives} & \\textbf{False positives} & \\textbf{False negatives} & \\textbf{Precision} & \\textbf{Recall} \\\\", file=f)
|
|
|
+ print("\\hline", file=f)
|
|
|
+ print("\\hline", file=f)
|
|
|
+ for i in ind_var_overt:
|
|
|
+ print(f"{ind_var[i]} & {fmt(overt_tp[i], num_trials>1)} & {fmt(overt_tn[i], num_trials>1)} & {fmt(overt_fp[i], num_trials>1)} & {fmt(overt_fn[i], num_trials>1)} & {fmt_pr(overt_precision_means[i], overt_precision_stddevs[i], num_trials>1)} & {fmt_pr(overt_recall_means[i], overt_recall_stddevs[i], num_trials>1)}\\\\", file=f)
|
|
|
+ print("\\hline", file=f)
|
|
|
+ print("\\end{tabular}", file=f)
|
|
|
+ print("\\end{table*}", file=f)
|
|
|
+ print("\\end{document}", file=f)
|
|
|
+
|
|
|
+ with open("results/experiment-1-table-flooding.tex", 'w') as f:
|
|
|
+ print(standalone_table_preamble, file=f)
|
|
|
+ print("""\\begin{table*}
|
|
|
+\\caption[Results of experiment 1 with flooding censor]{Results of the first experiment with the \\textbf{flooding censor}, specifically the mean and standard deviation number of true positives, true negatives, false positives, and false negatives for each set of trials. The independent variable in this experiment is the probability of users submitting reports. When Troll Patrol does not detect that bridges are blocked, Lox does not allow users to migrate to new bridges, so the number of overall bridges in the simulation does not grow. This accounts for the low number of overall bridges when the number of positive classifications (both true and false) is low.}
|
|
|
+\\label{experiment-1-results-flooding}
|
|
|
+\\centering
|
|
|
+\\begin{tabular}[p]{|C{0.1\\textwidth}|C{0.1\\textwidth}|C{0.105\\textwidth}|C{0.1\\textwidth}|C{0.105\\textwidth}|c|c|}""", file=f)
|
|
|
+ print("\\hline", file=f)
|
|
|
+ print("\\textbf{" + ind_var_str + "} & \\textbf{True positives} & \\textbf{True negatives} & \\textbf{False positives} & \\textbf{False negatives} & \\textbf{Precision} & \\textbf{Recall} \\\\", file=f)
|
|
|
+ print("\\hline", file=f)
|
|
|
+ print("\\hline", file=f)
|
|
|
+ for i in ind_var_flooding:
|
|
|
+ print(f"{ind_var[i]} & {fmt(flooding_tp[i], num_trials>1)} & {fmt(flooding_tn[i], num_trials>1)} & {fmt(flooding_fp[i], num_trials>1)} & {fmt(flooding_fn[i], num_trials>1)} & {fmt_pr(flooding_precision_means[i], flooding_precision_stddevs[i], num_trials>1)} & {fmt_pr(flooding_recall_means[i], flooding_recall_stddevs[i], num_trials>1)} \\\\", file=f)
|
|
|
+ print("\\hline", file=f)
|
|
|
+ print("\\end{tabular}", file=f)
|
|
|
+ print("\\end{table*}", file=f)
|
|
|
+ print("\\end{document}", file=f)
|
|
|
+else:
|
|
|
+
|
|
|
+ with open("results/experiment-2-table-overt.tex", 'w') as f:
|
|
|
+ print(standalone_table_preamble, file=f)
|
|
|
+ print("""\\begin{table*}
|
|
|
+ \\caption[Results of experiment 2 with overt censor]{Results of the second experiment with the \\textbf{overt censor}, specifically the mean and standard deviation number of true positives, true negatives, false positives, and false negatives for each set of trials. The independent variable in this experiment is the harshness of the classifier.}
|
|
|
+ \\label{experiment-2-results-overt}
|
|
|
+ \\centering
|
|
|
+ \\begin{tabular}[t]{|C{0.115\\textwidth}|C{0.1\\textwidth}|C{0.105\\textwidth}|C{0.1\\textwidth}|C{0.105\\textwidth}|c|c|}""", file=f)
|
|
|
+ print("\\hline", file=f)
|
|
|
+ print("\\textbf{Harshness} & \\textbf{True positives} & \\textbf{True negatives} & \\textbf{False positives} & \\textbf{False negatives} & \\textbf{Precision} & \\textbf{Recall} \\\\", file=f)
|
|
|
+ print("\\hline", file=f)
|
|
|
+ print("\\hline", file=f)
|
|
|
+ for i in ind_var_overt:
|
|
|
+ print(f"{ind_var[i]} & {fmt(overt_tp[i], num_trials>1)} & {fmt(overt_tn[i], num_trials>1)} & {fmt(overt_fp[i], num_trials>1)} & {fmt(overt_fn[i], num_trials>1)} & {fmt_pr(overt_precision_means[i], overt_precision_stddevs[i], num_trials>1)} & {fmt_pr(overt_recall_means[i], overt_recall_stddevs[i], num_trials>1)}\\\\", file=f)
|
|
|
+ print("\\hline", file=f)
|
|
|
+ print("\\end{tabular}", file=f)
|
|
|
+ print("\\end{table*}", file=f)
|
|
|
+ print("\\end{document}", file=f)
|
|
|
+
|
|
|
+ with open("results/experiment-2-table-flooding.tex", 'w') as f:
|
|
|
+ print(standalone_table_preamble, file=f)
|
|
|
+ print("""\\begin{table*}
|
|
|
+ \\caption[Results of experiment 2 with flooding censor]{Results of the second experiment with the \\textbf{flooding censor}, specifically the mean and standard deviation number of true positives, true negatives, false positives, and false negatives for each set of trials. The independent variable in this experiment is the harshness of the classifier.}
|
|
|
+ \\label{experiment-2-results-flooding}
|
|
|
+ \\centering
|
|
|
+ \\begin{tabular}[t]{|C{0.115\\textwidth}|C{0.1\\textwidth}|C{0.105\\textwidth}|C{0.1\\textwidth}|C{0.105\\textwidth}|c|c|}""", file=f)
|
|
|
+ print("\\hline", file=f)
|
|
|
+ print("\\textbf{Harshness} & \\textbf{True positives} & \\textbf{True negatives} & \\textbf{False positives} & \\textbf{False negatives} & \\textbf{Precision} & \\textbf{Recall} \\\\", file=f)
|
|
|
+ print("\\hline", file=f)
|
|
|
+ print("\\hline", file=f)
|
|
|
+ for i in ind_var_flooding:
|
|
|
+ print(f"{ind_var[i]} & {fmt(flooding_tp[i], num_trials>1)} & {fmt(flooding_tn[i], num_trials>1)} & {fmt(flooding_fp[i], num_trials>1)} & {fmt(flooding_fn[i], num_trials>1)} & {fmt_pr(flooding_precision_means[i], flooding_precision_stddevs[i], num_trials>1)} & {fmt_pr(flooding_recall_means[i], flooding_recall_stddevs[i], num_trials>1)} \\\\", file=f)
|
|
|
+ print("\\hline", file=f)
|
|
|
+ print("\\end{tabular}", file=f)
|
|
|
+ print("\\end{table*}", file=f)
|
|
|
+ print("\\end{document}", file=f)
|