plot-results.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488
  1. #!/usr/bin/env python3
  2. import matplotlib
  3. import matplotlib.pyplot as pyplot
  4. from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes,mark_inset
  5. import math
  6. import csv
  7. import json
  8. import sys
  9. # Pass experiment number as first arg
  10. experiment_num = int(sys.argv[1])
  11. # Pass number of trials as second arg (used to average results)
  12. num_trials = int(sys.argv[2])
  13. # (Pass list of *-bridge.csv files as remaining args)
  14. # Artificially truncate to this many days if we ran for longer
  15. num_days = 500
  16. # Max number of days for Troll Patrol to detect censorship. If it
  17. # doesn't detect it within this time, we count it as a false negative.
  18. max_number_of_days_to_detect = 10
  19. # Use bigger font size
  20. if experiment_num == 1:
  21. matplotlib.rcParams.update({'font.size': 14})
  22. else:
  23. matplotlib.rcParams.update({'font.size': 14})
  24. # Adjust width of experiment 1 figures
  25. width = 7.2
  26. # Get mean of list of numbers
  27. def mean(my_list):
  28. if len(my_list) == 0:
  29. return None
  30. sum = 0
  31. for i in my_list:
  32. sum += i
  33. return sum / len(my_list)
  34. # Get stddev of list of numbers
  35. def std_dev(my_list):
  36. if len(my_list) == 0:
  37. return None
  38. avg = mean(my_list)
  39. sum = 0
  40. for i in my_list:
  41. sum += (i - avg)**2
  42. sum /= len(my_list)
  43. return math.sqrt(sum)
  44. # Independent variable
  45. if experiment_num == 1:
  46. # Probability user submits reports
  47. # (note flooding does not use 0, so plot from index 1)
  48. ind_var = [0.0, 0.01, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
  49. else:
  50. # Harshness
  51. ind_var = [0, 1, 2, 3, 4]
  52. # Raw detection times for violin plots
  53. overt = [ [] for i in range(len(ind_var)) ]
  54. flooding = [ [] for i in range(len(ind_var)) ]
  55. # Get {True,False} {Positives,Negatives} for our trials
  56. overt_tp = [ [] for i in range(len(ind_var)) ]
  57. overt_tn = [ [] for i in range(len(ind_var)) ]
  58. overt_fp = [ [] for i in range(len(ind_var)) ]
  59. overt_fn = [ [] for i in range(len(ind_var)) ]
  60. flooding_tp = [ [] for i in range(len(ind_var)) ]
  61. flooding_tn = [ [] for i in range(len(ind_var)) ]
  62. flooding_fp = [ [] for i in range(len(ind_var)) ]
  63. flooding_fn = [ [] for i in range(len(ind_var)) ]
  64. # Remaining arguments should be *-bridges.csv files containing info on bridges
  65. for bfile in sys.argv[3:]:
  66. with open(bfile,'r') as bcsv:
  67. # Read data on bridges from CSV
  68. bridges = csv.reader(bcsv, delimiter=',')
  69. # Get censor_secrecy and ind_var from simulation config
  70. sfile = bfile[:-(len("bridges.csv"))] + "simulation_config.json"
  71. with open(sfile,'r') as sjson:
  72. config = json.load(sjson)
  73. secrecy = config["censor_secrecy"]
  74. if experiment_num == 1:
  75. var = config["prob_user_submits_reports"]
  76. index = ind_var.index(var)
  77. else:
  78. tfile = bfile[:-(len("bridges.csv"))] + "troll_patrol_config.json"
  79. with open(tfile,'r') as tjson:
  80. tconfig = json.load(tjson)
  81. # max_threshold used as harshness
  82. var = tconfig["max_threshold"]
  83. index = ind_var.index(var)
  84. # Get start date so we can ignore events after 500 days
  85. startfile = bfile[:-(len("bridges.csv"))] + "start.csv"
  86. with open(startfile,'r') as startcsv:
  87. start_dates = csv.reader(startcsv, delimiter=',')
  88. start_row = next(start_dates)
  89. start_date = int(start_row[0])
  90. end_date = start_date + num_days - 1
  91. # Raw detection times for violin plot
  92. detection_times = []
  93. # {True,False} {Positives,Negatives}
  94. true_pos = 0
  95. true_neg = 0
  96. false_pos = 0
  97. false_neg = 0
  98. for row in bridges:
  99. if row[0] == "Full stats per bridge:" or row[0] == "Fingerprint":
  100. continue
  101. # row[0] is the bridge fingerprint
  102. first_distributed = int(row[1])
  103. first_real_user = int(row[2])
  104. first_blocked = int(row[3])
  105. first_detected_blocked = int(row[4])
  106. # row[5] is first positive report
  107. # Treat anything after the end date like it didn't happen
  108. if first_distributed > end_date:
  109. first_distributed = 0
  110. if first_real_user > end_date:
  111. first_real_user = 0
  112. if first_blocked > end_date:
  113. first_blocked = 0
  114. if first_detected_blocked > end_date:
  115. first_detected_blocked = 0
  116. # Ignore bridges with no users
  117. if first_real_user == 0:
  118. continue
  119. # Did we identify correctly?
  120. # Negative classification
  121. if first_detected_blocked == 0:
  122. if first_blocked == 0:
  123. true_neg += 1
  124. else:
  125. false_neg += 1
  126. # Positive classification
  127. else:
  128. if first_blocked == 0 or first_detected_blocked < first_blocked:
  129. false_pos += 1
  130. # If we didn't detect it in time, consider it a false
  131. # negative, even if we eventually detected it
  132. elif first_detected_blocked - first_blocked > max_number_of_days_to_detect:
  133. false_neg += 1
  134. else:
  135. true_pos += 1
  136. # Add data point to plot in violin plot
  137. detection_times.append(first_detected_blocked - first_blocked)
  138. if secrecy == "Flooding":
  139. # Add raw data for violin plot
  140. flooding[index].extend(detection_times)
  141. flooding_tp[index].append(true_pos)
  142. flooding_tn[index].append(true_neg)
  143. flooding_fp[index].append(false_pos)
  144. flooding_fn[index].append(false_neg)
  145. else:
  146. # Add raw data for violin plot
  147. overt[index].extend(detection_times)
  148. overt_tp[index].append(true_pos)
  149. overt_tn[index].append(true_neg)
  150. overt_fp[index].append(false_pos)
  151. overt_fn[index].append(false_neg)
  152. # We may not have results for all values of the independent variable. If
  153. # we have a smaller set of values, track them.
  154. ind_var_overt = []
  155. ind_var_flooding = []
  156. # Get precision and recall for each trial
  157. overt_precision_means = []
  158. overt_precision_stddevs = []
  159. overt_recall_means = []
  160. overt_recall_stddevs = []
  161. # Get mean and stddev precision and recall
  162. for i in range(len(ind_var)):
  163. precisions = []
  164. recalls = []
  165. # If we have data, add its index to the list
  166. if len(overt_tp[i]) > 0:
  167. ind_var_overt.append(i)
  168. # Compute precision and recall for each trial
  169. for j in range(len(overt_tp[i])):
  170. precisions.append(overt_tp[i][j] / (overt_tp[i][j] + overt_fp[i][j]))
  171. recalls.append(overt_tp[i][j] / (overt_tp[i][j] + overt_fn[i][j]))
  172. # Add their means and stddevs to the appropriate lists
  173. overt_precision_means.append(mean(precisions))
  174. overt_precision_stddevs.append(std_dev(precisions))
  175. overt_recall_means.append(mean(recalls))
  176. overt_recall_stddevs.append(std_dev(recalls))
  177. flooding_precision_means = []
  178. flooding_precision_stddevs = []
  179. flooding_recall_means = []
  180. flooding_recall_stddevs = []
  181. # Get mean and stddev precision and recall
  182. for i in range(len(ind_var)):
  183. precisions = []
  184. recalls = []
  185. # If we have data, add its index to the list
  186. if len(flooding_tp[i]) > 0:
  187. ind_var_flooding.append(i)
  188. # Compute precision and recall for each trial
  189. for j in range(len(flooding_tp[i])):
  190. precisions.append(flooding_tp[i][j] / (flooding_tp[i][j] + flooding_fp[i][j]))
  191. recalls.append(flooding_tp[i][j] / (flooding_tp[i][j] + flooding_fn[i][j]))
  192. # Add their means and stddevs to the appropriate lists
  193. flooding_precision_means.append(mean(precisions))
  194. flooding_precision_stddevs.append(std_dev(precisions))
  195. flooding_recall_means.append(mean(recalls))
  196. flooding_recall_stddevs.append(std_dev(recalls))
  197. # Plot our data
  198. # Violin plots
  199. # Overt censor
  200. if experiment_num == 1:
  201. pyplot.violinplot([overt[i] for i in ind_var_overt], positions=[ind_var[i] for i in ind_var_overt], widths=0.04)
  202. pyplot.title("Time to Detect Censorship (Overt Censor)")
  203. pyplot.xlabel("Probability of users submitting reports")
  204. pyplot.ylabel("Days to detect censorship")
  205. pyplot.ylim(bottom=0)
  206. pyplot.savefig("results/figure-2b.png")
  207. pyplot.cla()
  208. else:
  209. pyplot.violinplot([overt[i] for i in ind_var_overt], positions=[ind_var[i] for i in ind_var_overt])
  210. pyplot.title("Time to Detect Censorship (Overt Censor)")
  211. pyplot.xlabel("Harshness")
  212. pyplot.xticks(ind_var)
  213. pyplot.ylabel("Days to detect censorship")
  214. pyplot.ylim(bottom=0)
  215. pyplot.savefig("results/figure-3b.png")
  216. pyplot.cla()
  217. # Flooding censor (should be orange)
  218. if experiment_num == 1:
  219. #pyplot.figure().set_figwidth(width)
  220. fv = pyplot.violinplot([flooding[i] for i in ind_var_flooding], positions=[ind_var[i] for i in ind_var_flooding], widths=0.045)
  221. else:
  222. fv = pyplot.violinplot([flooding[i] for i in ind_var_flooding], positions=[ind_var[i] for i in ind_var_flooding])
  223. # Make it orange regardless of experiment number
  224. for pc in fv["bodies"]:
  225. pc.set_facecolor("orange")
  226. pc.set_edgecolor("orange")
  227. for part in ("cbars", "cmins", "cmaxes"):
  228. fv[part].set_edgecolor("orange")
  229. if experiment_num == 1:
  230. pyplot.title("Time to Detect Censorship (Flooding Censor)")
  231. pyplot.xlabel("Probability of users submitting reports")
  232. pyplot.ylabel("Days to detect censorship")
  233. pyplot.ylim(bottom=0)
  234. pyplot.savefig("results/figure-2c.png")
  235. pyplot.cla()
  236. else:
  237. pyplot.title("Time to Detect Censorship (Flooding Censor)")
  238. pyplot.xlabel("Harshness")
  239. pyplot.xticks(ind_var)
  240. pyplot.ylabel("Days to detect censorship")
  241. pyplot.ylim(bottom=0)
  242. pyplot.savefig("results/figure-3c.png")
  243. pyplot.cla()
  244. # Precision vs. Recall
  245. if experiment_num == 1:
  246. # Also plot recall alone
  247. pyplot.ylim(0,1)
  248. ax = pyplot
  249. ax.errorbar([ind_var[i] for i in ind_var_overt], [overt_recall_means[i] for i in ind_var_overt], [overt_recall_stddevs[i] for i in ind_var_overt], linestyle="solid", marker='o', capsize=3)
  250. ax.errorbar([ind_var[i] for i in ind_var_flooding], [flooding_recall_means[i] for i in ind_var_flooding], [flooding_recall_stddevs[i] for i in ind_var_flooding], linestyle="dotted", marker='v', capsize=3)
  251. pyplot.xlabel("Probability of users submitting reports")
  252. pyplot.xlim(0,1)
  253. pyplot.ylabel("Recall")
  254. pyplot.ylim(0,1)
  255. pyplot.title("Proportion of Blocked Bridges Detected")
  256. pyplot.legend(["Overt censor", "Flooding censor"], loc = "lower right")
  257. pyplot.savefig("results/figure-2a.png")
  258. pyplot.cla()
  259. else:
  260. pyplot.xlim(0,1)
  261. pyplot.ylim(0,1.02)
  262. ax = pyplot.axes()
  263. ax.errorbar([overt_recall_means[i] for i in ind_var_overt], [overt_precision_means[i] for i in ind_var_overt], xerr=[overt_recall_stddevs[i] for i in ind_var_overt], yerr=[overt_precision_stddevs[i] for i in ind_var_overt], marker='o', capsize=3, linestyle="solid")
  264. ax.errorbar([flooding_recall_means[i] for i in ind_var_flooding], [flooding_precision_means[i] for i in ind_var_flooding], xerr=[flooding_recall_stddevs[i] for i in ind_var_flooding], yerr=[flooding_precision_stddevs[i] for i in ind_var_flooding], marker='v', capsize=3, linestyle="dotted")
  265. pyplot.xlabel("Recall")
  266. pyplot.xlim(0,1)
  267. pyplot.ylabel("Precision")
  268. pyplot.ylim(0,1.02)
  269. pyplot.title("Precision vs. Recall")
  270. pyplot.legend(["Overt censor", "Flooding censor"], loc = "lower left")
  271. # Zoom in on relevant part
  272. axins = zoomed_inset_axes(ax, zoom=1.75, bbox_to_anchor=(-0.325, -0.125, 1, 1), bbox_transform=ax.transAxes)
  273. axins.errorbar([overt_recall_means[i] for i in ind_var_overt], [overt_precision_means[i] for i in ind_var_overt], xerr=[overt_recall_stddevs[i] for i in ind_var_overt], yerr=[overt_precision_stddevs[i] for i in ind_var_overt], marker='o', capsize=3, linestyle="solid")
  274. axins.errorbar([flooding_recall_means[i] for i in ind_var_flooding], [flooding_precision_means[i] for i in ind_var_flooding], xerr=[flooding_recall_stddevs[i] for i in ind_var_flooding], yerr=[flooding_precision_stddevs[i] for i in ind_var_flooding], marker='v', capsize=3, linestyle="dotted")
  275. pyplot.xlim(0.75,1)
  276. pyplot.ylim(0.7,1.02)
  277. mark_inset(ax, axins, loc1=2, loc2=4)
  278. pyplot.savefig("results/figure-3a.png")
  279. pyplot.cla()
  280. # Format mean +- standard deviation with correct sigfigs and rounding.
  281. # I couldn't find an existing solution for this, so here's my awkward approach.
  282. def fmt(data, multiple_trials=True):
  283. # If we only run one trial, just use the count without standard deviation
  284. if not multiple_trials:
  285. return f"{data[0]}"
  286. # Get mean and standard deviation
  287. m = mean(data)
  288. s = std_dev(data)
  289. if s == 0:
  290. return f"{round(m)}$\\pm$0"
  291. # We have max 3600 bridges, so we will certainly never see this many.
  292. n = 10000
  293. while round(s / n) < 1:
  294. n /= 10
  295. s = round(s / n) * n
  296. m = round(m / n) * n
  297. if s >= 1:
  298. s = int(round(s))
  299. elif s >= 0.1:
  300. s = int(round(s*10)) / 10
  301. # We have a pesky 0.6000000...1 that causes problems. This is to handle that.
  302. if m >= 1:
  303. m = int(round(m))
  304. elif m >= 0.1:
  305. m = int(round(m*10)) / 10
  306. return f"{m}$\\pm${s}"
  307. def fmt_pr(m, s, multiple_trials=True):
  308. # If we only run one trial, round to 3 decimal places and don't
  309. # include standard deviations
  310. if not multiple_trials:
  311. m = int(round(m*1000)) / 1000
  312. return f"{m}"
  313. n = 1.0
  314. while s > 0 and round(s / n) < 1:
  315. n /= 10
  316. s = round(s / n) * n
  317. m = round(m / n) * n
  318. if s >= 0.1:
  319. s = int(round(s*10)) / 10
  320. m = int(round(m*10)) / 10
  321. elif s >= 0.01:
  322. s = int(round(s*100)) / 100
  323. m = int(round(m*100)) / 100
  324. elif s >= 0.001:
  325. s = int(round(s*1000)) / 1000
  326. m = int(round(m*1000)) / 1000
  327. elif s >= 0.0001:
  328. s = int(round(s*10000)) / 10000
  329. m = int(round(m*10000)) / 10000
  330. elif s >= 0.00001:
  331. s = int(round(s*100000)) / 100000
  332. m = int(round(m*100000)) / 100000
  333. elif s >= 0.000001:
  334. s = int(round(s*1000000)) / 1000000
  335. m = int(round(m*1000000)) / 1000000
  336. return f"{m}$\\pm${s}"
  337. # Output raw data as lines of table
  338. standalone_table_preamble = """\\documentclass{article}
  339. \\usepackage{standalone}
  340. \\usepackage{array}
  341. \\newcolumntype{C}[1]{>{\\centering\\arraybackslash}p{#1}}
  342. \\begin{document}"""
  343. # Use appropriate variables for this experiment
  344. if experiment_num == 1:
  345. ind_var_str = "Prob. users submit reports"
  346. # Make 2 different tables, one for overt censor and one for flooding censor
  347. with open("results/experiment-1-table-overt.tex", 'w') as f:
  348. print(standalone_table_preamble, file=f)
  349. print("""\\begin{table*}
  350. \\caption[Results of experiment 1 with overt censor]{Results of the first experiment with the \\textbf{overt censor}, specifically the mean and standard deviation number of true positives, true negatives, false positives, and false negatives for each set of trials. The independent variable in this experiment is the probability of users submitting reports.}
  351. \\label{experiment-1-results-overt}
  352. \\centering
  353. \\begin{tabular}[p]{|C{0.1\\textwidth}|C{0.1\\textwidth}|C{0.105\\textwidth}|C{0.1\\textwidth}|C{0.105\\textwidth}|c|c|}""", file=f)
  354. print("\\hline", file=f)
  355. print("\\textbf{" + ind_var_str + "} & \\textbf{True positives} & \\textbf{True negatives} & \\textbf{False positives} & \\textbf{False negatives} & \\textbf{Precision} & \\textbf{Recall} \\\\", file=f)
  356. print("\\hline", file=f)
  357. print("\\hline", file=f)
  358. for i in ind_var_overt:
  359. print(f"{ind_var[i]} & {fmt(overt_tp[i], num_trials>1)} & {fmt(overt_tn[i], num_trials>1)} & {fmt(overt_fp[i], num_trials>1)} & {fmt(overt_fn[i], num_trials>1)} & {fmt_pr(overt_precision_means[i], overt_precision_stddevs[i], num_trials>1)} & {fmt_pr(overt_recall_means[i], overt_recall_stddevs[i], num_trials>1)}\\\\", file=f)
  360. print("\\hline", file=f)
  361. print("\\end{tabular}", file=f)
  362. print("\\end{table*}", file=f)
  363. print("\\end{document}", file=f)
  364. with open("results/experiment-1-table-flooding.tex", 'w') as f:
  365. print(standalone_table_preamble, file=f)
  366. print("""\\begin{table*}
  367. \\caption[Results of experiment 1 with flooding censor]{Results of the first experiment with the \\textbf{flooding censor}, specifically the mean and standard deviation number of true positives, true negatives, false positives, and false negatives for each set of trials. The independent variable in this experiment is the probability of users submitting reports. When Troll Patrol does not detect that bridges are blocked, Lox does not allow users to migrate to new bridges, so the number of overall bridges in the simulation does not grow. This accounts for the low number of overall bridges when the number of positive classifications (both true and false) is low.}
  368. \\label{experiment-1-results-flooding}
  369. \\centering
  370. \\begin{tabular}[p]{|C{0.1\\textwidth}|C{0.1\\textwidth}|C{0.105\\textwidth}|C{0.1\\textwidth}|C{0.105\\textwidth}|c|c|}""", file=f)
  371. print("\\hline", file=f)
  372. print("\\textbf{" + ind_var_str + "} & \\textbf{True positives} & \\textbf{True negatives} & \\textbf{False positives} & \\textbf{False negatives} & \\textbf{Precision} & \\textbf{Recall} \\\\", file=f)
  373. print("\\hline", file=f)
  374. print("\\hline", file=f)
  375. for i in ind_var_flooding:
  376. print(f"{ind_var[i]} & {fmt(flooding_tp[i], num_trials>1)} & {fmt(flooding_tn[i], num_trials>1)} & {fmt(flooding_fp[i], num_trials>1)} & {fmt(flooding_fn[i], num_trials>1)} & {fmt_pr(flooding_precision_means[i], flooding_precision_stddevs[i], num_trials>1)} & {fmt_pr(flooding_recall_means[i], flooding_recall_stddevs[i], num_trials>1)} \\\\", file=f)
  377. print("\\hline", file=f)
  378. print("\\end{tabular}", file=f)
  379. print("\\end{table*}", file=f)
  380. print("\\end{document}", file=f)
  381. else:
  382. # Make 2 tables for experiment 2
  383. with open("results/experiment-2-table-overt.tex", 'w') as f:
  384. print(standalone_table_preamble, file=f)
  385. print("""\\begin{table*}
  386. \\caption[Results of experiment 2 with overt censor]{Results of the second experiment with the \\textbf{overt censor}, specifically the mean and standard deviation number of true positives, true negatives, false positives, and false negatives for each set of trials. The independent variable in this experiment is the harshness of the classifier.}
  387. \\label{experiment-2-results-overt}
  388. \\centering
  389. \\begin{tabular}[t]{|C{0.115\\textwidth}|C{0.1\\textwidth}|C{0.105\\textwidth}|C{0.1\\textwidth}|C{0.105\\textwidth}|c|c|}""", file=f)
  390. print("\\hline", file=f)
  391. print("\\textbf{Harshness} & \\textbf{True positives} & \\textbf{True negatives} & \\textbf{False positives} & \\textbf{False negatives} & \\textbf{Precision} & \\textbf{Recall} \\\\", file=f)
  392. print("\\hline", file=f)
  393. print("\\hline", file=f)
  394. for i in ind_var_overt:
  395. print(f"{ind_var[i]} & {fmt(overt_tp[i], num_trials>1)} & {fmt(overt_tn[i], num_trials>1)} & {fmt(overt_fp[i], num_trials>1)} & {fmt(overt_fn[i], num_trials>1)} & {fmt_pr(overt_precision_means[i], overt_precision_stddevs[i], num_trials>1)} & {fmt_pr(overt_recall_means[i], overt_recall_stddevs[i], num_trials>1)}\\\\", file=f)
  396. print("\\hline", file=f)
  397. print("\\end{tabular}", file=f)
  398. print("\\end{table*}", file=f)
  399. print("\\end{document}", file=f)
  400. with open("results/experiment-2-table-flooding.tex", 'w') as f:
  401. print(standalone_table_preamble, file=f)
  402. print("""\\begin{table*}
  403. \\caption[Results of experiment 2 with flooding censor]{Results of the second experiment with the \\textbf{flooding censor}, specifically the mean and standard deviation number of true positives, true negatives, false positives, and false negatives for each set of trials. The independent variable in this experiment is the harshness of the classifier.}
  404. \\label{experiment-2-results-flooding}
  405. \\centering
  406. \\begin{tabular}[t]{|C{0.115\\textwidth}|C{0.1\\textwidth}|C{0.105\\textwidth}|C{0.1\\textwidth}|C{0.105\\textwidth}|c|c|}""", file=f)
  407. print("\\hline", file=f)
  408. print("\\textbf{Harshness} & \\textbf{True positives} & \\textbf{True negatives} & \\textbf{False positives} & \\textbf{False negatives} & \\textbf{Precision} & \\textbf{Recall} \\\\", file=f)
  409. print("\\hline", file=f)
  410. print("\\hline", file=f)
  411. for i in ind_var_flooding:
  412. print(f"{ind_var[i]} & {fmt(flooding_tp[i], num_trials>1)} & {fmt(flooding_tn[i], num_trials>1)} & {fmt(flooding_fp[i], num_trials>1)} & {fmt(flooding_fn[i], num_trials>1)} & {fmt_pr(flooding_precision_means[i], flooding_precision_stddevs[i], num_trials>1)} & {fmt_pr(flooding_recall_means[i], flooding_recall_stddevs[i], num_trials>1)} \\\\", file=f)
  413. print("\\hline", file=f)
  414. print("\\end{tabular}", file=f)
  415. print("\\end{table*}", file=f)
  416. print("\\end{document}", file=f)