{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Benchmark"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "from sys import argv\n",
    "rootdir = argv[1]\n",
    "\n",
    "#############################\n",
    "#      FOR NOTEBOOK USE     #\n",
    "#     SET DIRECTORY HERE    #\n",
    "#                           #\n",
    "#rootdir = \"shmem_plot\"\n",
    "#                           #\n",
    "#############################\n",
    "\n",
    "print(\"Using root directory: {}\".format(rootdir))\n",
    "\n",
    "# Get the subdirs with the different tests\n",
    "subdirs = sorted([ name for name in os.listdir('{}'.format(rootdir)) if os.path.isdir(os.path.join('{}'.format(rootdir), name)) ])\n",
    "print(\"Available subdirs: {}\".format(subdirs))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "from sys import exit\n",
    "\n",
    "with open(\"{}/settings.json\".format(rootdir)) as json_file:\n",
    "    settings = json.load(json_file)\n",
    "\n",
    "print(\"Succesfully loaded JSON file\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Load all files\n",
    "A group that will be one line in the summarizing graph is a *node-type* + *mode* combination. This group contains the variable *rate*. See the following three groups as example:\n",
    "\n",
    "* InfiniBand (RC): 1KHz, 10KHz, 50KHz, 100KHz\n",
    "* InfiniBand (UD): 1KHz, 10KHz, 50KHz, 100KHz\n",
    "* MQTT (UDP): 1KHz, 10KHz, 50KHz\n",
    "\n",
    "## Save characteristics of tests\n",
    "All important settings are contained in the name of the file. We will save them in a separate array. The structure of the name is as follows:\n",
    "\n",
    "```bash\n",
    "root_dir/benchmarks_${DATE}/${ID}_${MODE}-${VALUES IN SMP}-${RATE}-${SENT SMPS}\n",
    "```\n",
    "\n",
    "Thus, we will structure it in the settings_array as follows:\n",
    "\n",
    "* `settings_array[*][0] = ID`\n",
    "* `settings_array[*][1] = MODE`\n",
    "* `settings_array[*][2] = VALUES IN SAMPLE`\n",
    "* `settings_array[*][3] = RATE`\n",
    "* `settings_array[*][4] = TOTAL NUMBER OF SAMPLES`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import re\n",
    "\n",
    "# First, source log\n",
    "\n",
    "# Initialize arrays\n",
    "input_dataset = []\n",
    "output_dataset = []\n",
    "settings_array = []\n",
    "\n",
    "\n",
    "for i, subdir in enumerate(subdirs):\n",
    "    input_dataset.append([])\n",
    "    output_dataset.append([])\n",
    "\n",
    "    # Acquire node type from the directory\n",
    "    matchObj = re.match(r'(\\w*)_[A-Z]', subdir, re.M|re.I)\n",
    "    \n",
    "    # Fill value to array\n",
    "    if matchObj:\n",
    "        node_type = matchObj[1]\n",
    "\n",
    "    # Acquire all tests in that subdirectory\n",
    "    for walk_subdir, dirs, files in sorted(os.walk(\"{}/{}\".format(rootdir, subdir))):\n",
    "        input_dataset.append([])\n",
    "        output_dataset.append([])\n",
    "        settings_array.append([])\n",
    "        \n",
    "        for file in sorted(files):\n",
    "            ############################\n",
    "            ###### SAVE SETTINGS #######\n",
    "            ############################\n",
    "            temp_settings = []\n",
    "            temp_settings.append(node_type)\n",
    "        \n",
    "            # Match settings, as described above\n",
    "            matchObj = re.match(r'.*?(\\d*)_(\\w*)-(\\d*)-(\\d*)-(\\d*)_output.csv', file, re.M|re.I)\n",
    "\n",
    "            # Fill values to array\n",
    "            if matchObj:\n",
    "                for j in range(0,5):\n",
    "                    temp_settings.append(matchObj.group(j + 1))\n",
    "    \n",
    "                # Append array to big array\n",
    "                settings_array[i].append(temp_settings)\n",
    "            \n",
    "            ############################\n",
    "            ######### LOAD DATA ########\n",
    "            ############################\n",
    "      \n",
    "            # Regex to match input files\n",
    "            if re.match(r'.*?_input.csv', file, re.M|re.I):\n",
    "                # Load file \n",
    "                input_dataset[i].append(np.genfromtxt(\"{}/{}/{}\".format(rootdir, subdir, file), delimiter=','))\n",
    "                \n",
    "                print(\"Loaded input dataset from: {}\".format(file))\n",
    "\n",
    "            # Regex to match output files files\n",
    "            elif re.match(r'.*?_output.csv', file, re.M|re.I):\n",
    "                output_dataset[i].append(np.genfromtxt(\"{}/{}/{}\".format(rootdir, subdir, file), delimiter=','))\n",
    "                \n",
    "                print(\"Loaded output dataset from: {}\".format(file))\n",
    "\n",
    "    print(\"Settings for this subdirectory: \")\n",
    "    print(settings_array[i])\n",
    "    print(\"\\n\")\n",
    "\n",
    "    # Small sanity check, are arrays of the same size?\n",
    "    if len(input_dataset[i]) != len(output_dataset[i]):\n",
    "        print(\"Error: There should be as many input files as there are output files!\")\n",
    "        exit();"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Get missed steps from source node\n",
    "..."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Number of missing samples at receive side\n",
    "missed_send_arr = []\n",
    "# Percentage of missed samples\n",
    "perc_miss_send_arr = []\n",
    "\n",
    "# Generate real total and number of missing samples.\n",
    "# Print percentage of missed samples\n",
    "for i, subdir in enumerate(subdirs):\n",
    "    missed_send_arr.append([])\n",
    "    perc_miss_send_arr.append([])\n",
    "    \n",
    "    for (j, csv_vec) in enumerate(input_dataset[i]):\n",
    "        # Get number of missing samples\n",
    "        missed_send_arr[i].append(int(settings_array[i][j][5]) - len(csv_vec))\n",
    "\n",
    "        # Take percentage\n",
    "        perc_miss_send_arr[i].append(round(missed_send_arr[i][j] / int(settings_array[i][j][5]) * 100, 2))\n",
    "        \n",
    "        print(\"Test {} ({}) is missing {} ({}%) of {} in in-file.\"\n",
    "              .format(settings_array[i][j][0], settings_array[i][j][2], missed_send_arr[i][j], \n",
    "                      perc_miss_send_arr[i][j], settings_array[i][j][5]))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Get missed steps from destination node\n",
    "..."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Number of missing samples at receive side\n",
    "missed_recv_arr = []\n",
    "# Percentage of missed samples\n",
    "perc_miss_recv_arr = []\n",
    "\n",
    "# Generate real total and number of missing samples.\n",
    "# Print percentage of missed samples\n",
    "for i, subdir in enumerate(subdirs):\n",
    "    missed_recv_arr.append([])\n",
    "    perc_miss_recv_arr.append([])\n",
    "\n",
    "    for (j, csv_vec) in enumerate(output_dataset[i]):\n",
    "\n",
    "        # Get number of missing samples\n",
    "        missed_recv_arr[i].append(int(settings_array[i][j][5]) - len(csv_vec))\n",
    "\n",
    "        # Take percentage\n",
    "        perc_miss_recv_arr[i].append(round(missed_recv_arr[i][j] / int(settings_array[i][j][5]) * 100, 2))\n",
    "\n",
    "        print(\"Test {} ({}) is missing {} ({}%) of {} in out-file.\"\n",
    "              .format(settings_array[i][j][0], settings_array[i][j][2], missed_recv_arr[i][j], \n",
    "                      perc_miss_recv_arr[i][j], settings_array[i][j][5]))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Check first and second sample from receive & destination node\n",
    "..."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Check first and second sample\n",
    "\n",
    "first_second_smp_input = []\n",
    "first_second_smp_output = []\n",
    "\n",
    "for i, subdir in enumerate(subdirs):\n",
    "    first_second_smp_input.append([])\n",
    "    first_second_smp_output.append([])\n",
    "    \n",
    "    for (j, csv_vec) in enumerate(input_dataset[i]):\n",
    "        first_second_smp_input[i].append([csv_vec[0][3], csv_vec[1][3]])\n",
    "        print(\"First and second sample of test {} ({}): {} and {}, respectively\".format(settings_array[i][j][0],\n",
    "                                                                                   settings_array[i][j][2],\n",
    "                                                                                   int(first_second_smp_input[i][j][0]),\n",
    "                                                                                   int(first_second_smp_input[i][j][1])))\n",
    "\n",
    "    for (j, csv_vec) in enumerate(output_dataset[i]):\n",
    "        first_second_smp_output[i].append([csv_vec[0][3], csv_vec[1][3]])\n",
    "        print(\"First and second sample of test {} ({}): {} and {}, respectively\".format(settings_array[i][j][0],\n",
    "                                                                                   settings_array[i][j][2],\n",
    "                                                                                   int(first_second_smp_output[i][j][0]),\n",
    "                                                                                   int(first_second_smp_output[i][j][1])))\n",
    "        \n",
    "    print(\"\")\n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Compare input and output data sets\n",
    "..."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "missing_seq = []\n",
    "\n",
    "never_trans_total_arr = []\n",
    "never_trans_after_arr = []\n",
    "\n",
    "perc_never_trans_total_arr = []\n",
    "perc_never_trans_after_arr = []\n",
    "\n",
    "# Loop through input_array, since this is always bigger or equal to output array\n",
    "for i, subdir in enumerate(subdirs):\n",
    "    never_trans_total_arr.append([])\n",
    "    never_trans_after_arr.append([])\n",
    "    \n",
    "    perc_never_trans_total_arr.append([])\n",
    "    perc_never_trans_after_arr.append([])\n",
    "    \n",
    "    missing_seq.append([])\n",
    "    \n",
    "    for (j, csv_vec) in enumerate(input_dataset[i]):    \n",
    "        l = 0\n",
    "        missing_seq[i].append([])\n",
    "        for (k, line) in enumerate(csv_vec):      \n",
    "            try:\n",
    "                if line[3] != output_dataset[i][j][l][3]:\n",
    "                    missing_seq[i][j].append(line[3])\n",
    "                else:\n",
    "                    l += 1\n",
    "\n",
    "            except IndexError:\n",
    "                pass\n",
    "\n",
    "        never_trans_total_arr[i].append(len(missing_seq[i][j]))\n",
    "\n",
    "        never_trans_after_arr[i].append(np.sum(missing_seq[i][j] > first_second_smp_output[i][j][0]))\n",
    "\n",
    "        # Take percentage\n",
    "        perc_never_trans_total_arr[i].append(round(never_trans_total_arr[i][j] / int(settings_array[i][j][4]) * 100, 2))\n",
    "        perc_never_trans_after_arr[i].append(round(never_trans_after_arr[i][j] / int(settings_array[i][j][4]) * 100, 2))\n",
    "\n",
    "        print(\"Test {} ({}): {} ({}%) samples were never transferred \".format(settings_array[i][j][0],\n",
    "                                                                              settings_array[i][j][2],\n",
    "                                                                              never_trans_total_arr[i][j],\n",
    "                                                                              perc_never_trans_total_arr[i][j]))\n",
    "        print(\"{} ({}%) of these after the first sample occured in out-file.\".format(never_trans_after_arr[i][j],\n",
    "                                                                                     perc_never_trans_after_arr[i][j]))\n",
    "\n",
    "        print(\"\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Calculate medians"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "medians = []\n",
    "upper_limit = []\n",
    "lower_limit = []\n",
    "\n",
    "for i, subdir in enumerate(subdirs):\n",
    "    medians.append([])\n",
    "    upper_limit.append([])\n",
    "    lower_limit.append([])\n",
    "\n",
    "    for (j, csv_vec) in enumerate(output_dataset[i]):  \n",
    "        medians[i].append(np.median(csv_vec.transpose()[2]) * 1e6)\n",
    "\n",
    "        if settings['median_plot']['enabled']:\n",
    "            # np.sort(recv[i][j] - enq_send[i][j])[int(np.size(recv[i][j]]) / 2)] would be the approximately the median\n",
    "            # Calculate upper 10% and lower 10%\n",
    "            upper_limit[i].append(abs(medians[i][j] - 1e6 * np.sort(csv_vec.transpose()[2])[int(9 * np.size(csv_vec.transpose()[2]) / 10)]))\n",
    "            lower_limit[i].append(abs(medians[i][j] - 1e6 * np.sort(csv_vec.transpose()[2])[int(1 * np.size(csv_vec.transpose()[2]) / 10)]))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": true
   },
   "source": [
    "## Plot data\n",
    "### First, define some functions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define Fancy Box function we use\n",
    "def plot_fancy_box(bottom, height, ax):\n",
    "    top = bottom + height\n",
    "    \n",
    "    p = FancyBboxPatch((left, bottom),\n",
    "                       width,\n",
    "                       height,\n",
    "                       boxstyle=\"round, pad=0.005\",\n",
    "                       \n",
    "                       ec=\"#dbdbdb\", \n",
    "                       fc=\"white\", \n",
    "                       alpha=0.85,\n",
    "                       transform=ax.transAxes\n",
    "                      )\n",
    "    ax.add_patch(p)\n",
    "    \n",
    "    \n",
    "# Define \"find nearest\" function\n",
    "def find_nearest(array, value):\n",
    "    array = np.asarray(array)\n",
    "    idx = (np.abs(array - value)).argmin()\n",
    "    return array[idx], idx"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Import all necessary libraries to plot"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "from matplotlib.font_manager import FontProperties\n",
    "from matplotlib.patches import FancyBboxPatch\n",
    "from matplotlib.ticker import MultipleLocator\n",
    "import pylab    \n",
    "from mpl_toolkits.mplot3d import Axes3D\n",
    "import matplotlib.pyplot as plt\n",
    "from matplotlib import cm\n",
    "from matplotlib.ticker import LinearLocator, FormatStrFormatter\n",
    "import matplotlib as mpl\n",
    "import matplotlib.legend as mlegend"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Start with histograms if they are enabled"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "if settings['histograms']['enabled']:\n",
    "    for i, subdir in enumerate(subdirs):\n",
    "        for (j, csv_vec) in enumerate(output_dataset[i]):\n",
    "            # Create figure\n",
    "            fig = plt.figure(num=None, figsize=(12, 4), dpi=90, facecolor='w', edgecolor='k')\n",
    "\n",
    "            # Add plot and set title\n",
    "            ax = fig.add_subplot(111)\n",
    "\n",
    "            # Set grid\n",
    "            ax.set_axisbelow(True)\n",
    "            ax.grid(True, linestyle='--')\n",
    "\n",
    "            x_limit = 0.00005\n",
    "            bins = np.arange(0, 50, 50 / 100)\n",
    "\n",
    "            # Data in plot\n",
    "            # http://www.color-hex.com/color-palette/33602\n",
    "            csv_vec_t = csv_vec.transpose()\n",
    "\n",
    "            ax.hist(csv_vec_t[2] * 1e6, label=settings['histograms']['labels'][0],\n",
    "                    edgecolor='black',\n",
    "                    bins=bins,\n",
    "                    color='#00549f')\n",
    "            ax.axvline(medians[i][j], color='red', linestyle='-', linewidth=1, alpha=0.85)\n",
    "\n",
    "            # Set axis and calculate values above limit\n",
    "            plt.xlim([0,x_limit])\n",
    "\n",
    "            ###################################\n",
    "            # SET TICKS #######################\n",
    "            ###################################\n",
    "            ticks = np.arange(0, x_limit * 1e6 + 1, 2)\n",
    "\n",
    "            nearest, nearest_idx = find_nearest(ticks, medians[i][j])\n",
    "            \n",
    "            ticks = np.append(ticks, medians[i][j])\n",
    "\n",
    "            # Explicitly set labels\n",
    "            labels = []\n",
    "\n",
    "            for value in ticks:\n",
    "                if value == nearest and np.abs(nearest - medians[i][j]) < 200:\n",
    "                    labels.append(\"\")\n",
    "                elif value == (medians[i][j]):\n",
    "                    labels.append(value)\n",
    "                else:\n",
    "                    labels.append(str(int(value)))\n",
    "\n",
    "            plt.yticks(fontsize=10, family='monospace')\n",
    "            plt.xticks(ticks, labels, fontsize=10, family='monospace', rotation=30, horizontalalignment='right', rotation_mode=\"anchor\")\n",
    "\n",
    "            for value in ax.get_xticklabels():\n",
    "                try:\n",
    "                    if int(float(value.get_text())) == int(medians[i][j]):\n",
    "                        value.set_color('red')\n",
    "                except ValueError:\n",
    "                    # We got some empty values. Ignore them\n",
    "                    pass\n",
    "\n",
    "            minorLocator = MultipleLocator(1)\n",
    "            ax.xaxis.set_minor_locator(minorLocator)\n",
    "\n",
    "            ###################################\n",
    "            # CONFIGURE AXIS ##################\n",
    "            ###################################\n",
    "            # Set labels\n",
    "            ax.set_xlabel(settings['histograms']['axis_labels']['x'], fontsize=10, family='monospace', labelpad = 4)\n",
    "            ax.set_ylabel(settings['histograms']['axis_labels']['y'], fontsize=10, family='monospace', labelpad = 6)\n",
    "            \n",
    "            # Set scale\n",
    "            ax.set_yscale('log')\n",
    "\n",
    "            ###################################\n",
    "            # CREATE TEXTBOXES ################\n",
    "            ###################################\n",
    "            off_bigger_50us = round((np.size(csv_vec_t[2][csv_vec_t[2] > x_limit]) / np.size(csv_vec_t[2])) * 100, 2)\n",
    "\n",
    "            offset_text = '$\\mathtt{{t_{{lat}}>50µs: }}${0: >5.2f}% ($\\mathtt{{\\\\max\\\\,t_{{lat}}}}$: {1:>7.2f}µs)'.format(off_bigger_50us, round(np.max(csv_vec_t[2]) * 1e6, 2))\n",
    "\n",
    "            # Create text for missed steps\n",
    "            missed_text  = ' in: {0:6d} ({1:5.2f}%)\\n'.format(missed_send_arr[i][j], perc_miss_send_arr[i][j])\n",
    "            missed_text += 'out: {0:6d} ({1:5.2f}%)'.format(missed_recv_arr[i][j], perc_miss_recv_arr[i][j])\n",
    "\n",
    "            # Create text for missed steps\n",
    "            never_transferred_text  = 'total: {0:5d} ({1:5.2f}%)\\n'.format(never_trans_total_arr[i][j], perc_never_trans_total_arr[i][j])\n",
    "            never_transferred_text += 'while connected: {0:5d} ({1:5.2f}%)'.format(never_trans_after_arr[i][j], perc_never_trans_after_arr[i][j])\n",
    "\n",
    "            # Set font properties for headers and text\n",
    "            font_header = FontProperties()\n",
    "            font_header.set_family('monospace')\n",
    "            font_header.set_weight('bold')\n",
    "            font_header.set_size(9.5)\n",
    "\n",
    "            font_text = FontProperties()\n",
    "            font_text.set_size(9.5)\n",
    "            font_text.set_family('monospace')\n",
    "\n",
    "            # Set box constraints for wrapper and plot wrapper\n",
    "            left, width = .673, .33\n",
    "            right = left + width\n",
    "\n",
    "            plot_fancy_box(bottom = 0.46, height = 0.65, ax = ax)\n",
    "\n",
    "            # Set box constraints for text boxes\n",
    "            left, width = .685, .30\n",
    "            right = left + width\n",
    "\n",
    "            # Offset boxes\n",
    "            plot_fancy_box(bottom = 0.9085, height = 0.085, ax = ax)\n",
    "\n",
    "            ax.text(right, 0.975, offset_text,\n",
    "                    verticalalignment='top', horizontalalignment='right',\n",
    "                    transform=ax.transAxes,\n",
    "                    color='black', fontproperties = font_text)\n",
    "\n",
    "            # Missed steps\n",
    "            plot_fancy_box(bottom = 0.695, height = 0.18, ax = ax)\n",
    "\n",
    "            ax.text(right, 0.868, \"missing samples:\",\n",
    "                    verticalalignment='top', horizontalalignment='right',\n",
    "                    transform=ax.transAxes,\n",
    "                    color='black', fontproperties = font_header)\n",
    "            ax.text(right, 0.804, missed_text,\n",
    "                    verticalalignment='top', horizontalalignment='right',\n",
    "                    transform=ax.transAxes,\n",
    "                    color='black', fontproperties = font_text)\n",
    "\n",
    "            # Never transferred\n",
    "            plot_fancy_box(bottom = 0.487, height = 0.175, ax = ax)\n",
    "\n",
    "            ax.text(right, 0.657, \"samples not transmitted:\",\n",
    "                    verticalalignment='top', horizontalalignment='right',\n",
    "                    transform=ax.transAxes,\n",
    "                    color='black', fontproperties = font_header)\n",
    "            ax.text(right, 0.593, never_transferred_text,\n",
    "                    verticalalignment='top', \n",
    "                    horizontalalignment='right',\n",
    "                    transform=ax.transAxes,\n",
    "                    color='black', fontproperties = font_text)\n",
    "\n",
    "\n",
    "            ###################################\n",
    "            # SAVE PLOT #######################\n",
    "            ###################################\n",
    "            plt.minorticks_on()\n",
    "            plt.tight_layout()\n",
    "\n",
    "            fig.savefig('{}/{}_{}_{}i_{}j.pdf'.format(rootdir, \n",
    "                                                      settings_array[i][j][0], \n",
    "                                                      settings_array[i][j][2], i, j),\n",
    "                        format='pdf')        \n",
    "\n",
    "    ###################################\n",
    "    # CREATE HISTOGRAM LEGEND #########\n",
    "    ###################################\n",
    "    # create a second figure for the legend\n",
    "    figLegend = pylab.figure(figsize = settings['histograms']['dimensions']['legend'])\n",
    "\n",
    "    # produce a legend for the objects in the other figure\n",
    "    pylab.figlegend(*ax.get_legend_handles_labels(), loc = 'upper left',\n",
    "                    prop={'family':'monospace', 'size':'8'}, \n",
    "                    ncol=settings['histograms']['legend_columns'])\n",
    "    \n",
    "    figLegend.savefig(\"{}/legend_histogram.pdf\".format(rootdir), format='pdf')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "if settings['median_plot']['enabled']:\n",
    "    # Create figure and axis\n",
    "    fig_median = plt.figure(num=None, figsize=(12, 4), dpi=90, facecolor='w', edgecolor='k')\n",
    "    ax_median = fig_median.add_subplot(111)\n",
    "\n",
    "    for i, subdir in enumerate(subdirs):\n",
    "\n",
    "        ###################################\n",
    "        # CREATE MEDIAN PLOT ##############\n",
    "        ###################################\n",
    "        x_data = np.array([])\n",
    "        for k in range(0, len(medians[i])):\n",
    "            x_data = np.append(x_data, k)\n",
    "\n",
    "        ax_median.errorbar(x_data, medians[i], yerr=[lower_limit[i], upper_limit[i]],\n",
    "                           capsize = 3.7, elinewidth = 1, markeredgewidth = 1, \n",
    "                           marker='v', zorder = 2 + i, color=settings['median_plot']['colors'][i],\n",
    "                           label=settings['median_plot']['labels'][i])\n",
    "\n",
    "        ###################################\n",
    "        # PRINT MISSED STEPS ##############\n",
    "        ###################################\n",
    "        if settings['median_plot']['print_missed_steps']:\n",
    "            for l, median in enumerate(medians[i]):\n",
    "                \n",
    "                p = FancyBboxPatch((x_data[l] + 0.07, median + 0.08), 0.345, 0.26, boxstyle=\"round, pad=0.005\",\n",
    "                                    ec=\"#dbdbdb\", fc=\"white\", alpha=0.85)\n",
    "                ax_median.add_patch(p)\n",
    "                \n",
    "                ax_median.text(x_data[l] + 0.1, median + 0.15, \"{: >4.2f}%\".format(perc_miss_recv_arr[i][l]))\n",
    "                \n",
    "            # Create bbox patch for legend\n",
    "            #p = FancyBboxPatch((0, 0), 5, 1, boxstyle=\"round, pad=0.5\", ec=\"#dbdbdb\", fc=\"white\", alpha=0.85)\n",
    "        \n",
    "            handles = []\n",
    "            handles.append(p)\n",
    "            text= '% of samples missed by signal generator'\n",
    "            leg2 = mlegend.Legend(ax_median, handles, labels=[text], loc = 'upper left', ncol=1,\n",
    "                                  prop={'family':'monospace', 'size':'8'})\n",
    "\n",
    "  \n",
    "        \n",
    "    ###################################\n",
    "    # SET AXIS OF MEDIAN PLOT #########\n",
    "    ###################################\n",
    "    ax_median.set_xticks(np.arange(0, len(settings['median_plot']['ticks']['x']), 1))\n",
    "    ax_median.set_xticklabels(settings['median_plot']['ticks']['x'])\n",
    "    \n",
    "    if settings['median_plot']['log_scale']:\n",
    "        ax_median.set_yscale('log')\n",
    "    else:\n",
    "        ax_median.set_ylim([settings['median_plot']['ticks']['y'][0], settings['median_plot']['ticks']['y'][-1]])\n",
    "        ax_median.set_yticks(settings['median_plot']['ticks']['y'])\n",
    "        \n",
    "    ax_median.set_xlabel(settings['median_plot']['axis_labels']['x'], fontsize=11, family='monospace', labelpad=6)\n",
    "    ax_median.set_ylabel(settings['median_plot']['axis_labels']['y'], fontsize=11, family='monospace', labelpad=6)\n",
    "    ax_median.set_axisbelow(True)\n",
    "    ax_median.grid(True, linestyle='--')\n",
    "\n",
    "    ax_median.yaxis.grid(True, linestyle='-', which='major', color='black', alpha=0.8)\n",
    "    ax_median.yaxis.grid(True, linestyle='--', which='minor', color='lightgrey', alpha=0.3)\n",
    "\n",
    "    ###################################\n",
    "    # EXPORT MEDIANS AND CREATE #######\n",
    "    # LEGEND OF MEDIAN TABLE ##########\n",
    "    ###################################\n",
    "    plt.tight_layout()\n",
    "    fig_median.savefig('{}/median_graph.pdf'.format(rootdir), dpi=600, format='pdf', bbox_inches='tight')\n",
    "\n",
    "    # create a second figure for the legend\n",
    "    figLegend = pylab.figure(figsize = settings['median_plot']['dimensions']['legend'])\n",
    "    \n",
    "\n",
    "    leg_temp = pylab.figlegend(*ax_median.get_legend_handles_labels(), loc = 'upper left', labelspacing=1.2,\n",
    "                    prop={'family':'monospace', 'size':'8'}, ncol=settings['median_plot']['legend_columns'])\n",
    "    \n",
    "    if settings['median_plot']['print_missed_steps']:\n",
    "        leg_temp._legend_box._children.append(leg2._legend_box._children[1])\n",
    "        leg_temp._legend_box.align=\"left\"\n",
    "        \n",
    "    figLegend.savefig(\"{}/legend_median_plot.pdf\".format(rootdir), format='pdf')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Create 3D-Plot if enabled"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "if settings['3d_plot']['enabled']:\n",
    "    for i, subdir in enumerate(subdirs):\n",
    "        fig_3d = plt.figure(num=None, figsize=(16, 7), dpi=90, facecolor='w', edgecolor='k')\n",
    "        ax_3d = fig_3d.gca(projection='3d')\n",
    "\n",
    "        # Make data.\n",
    "        X = np.array([])\n",
    "        for k in range(0, len(settings['3d_plot']['ticks']['x'])):\n",
    "            X = np.append(X, k)\n",
    "\n",
    "        Y = np.array([])\n",
    "        for k in range(0, len(settings['3d_plot']['ticks']['y'])):\n",
    "            Y = np.append(Y, k)\n",
    "\n",
    "        X, Y = np.meshgrid(X, Y)\n",
    "\n",
    "        Z = np.array([])\n",
    "        for k in range(0, len(settings['3d_plot']['ticks']['y'])):\n",
    "            for l in range(0, len(settings['3d_plot']['ticks']['x'])):\n",
    "                Z = np.append(Z, medians[i][k * len(settings['3d_plot']['ticks']['x']) + l])\n",
    "                \n",
    "        ###################################\n",
    "        # PRINT MISSED STEPS ##############\n",
    "        ###################################\n",
    "        \n",
    "        props = dict(boxstyle='round', facecolor='white', alpha=0.8)\n",
    "        \n",
    "        # if more than 5% of the samples were missed, print it to figure\n",
    "        for k in range(0, len(input_dataset[i])):\n",
    "            if perc_miss_send_arr[i][k] > 5:\n",
    "                x = k % (len(settings['3d_plot']['ticks']['x']))\n",
    "                y = np.floor(k / (len(settings['3d_plot']['ticks']['y'])))\n",
    "                z = Z[k]\n",
    "                \n",
    "                x_delta = 0.65\n",
    "                y_delta = 0.65\n",
    "                z_delta = 0.5 * (Z[k] - Z[k - len(settings['3d_plot']['ticks']['y'])])\n",
    "                ax_3d.text(x - x_delta, y - y_delta, z - z_delta, \"{: >4.2f}%\".format(perc_miss_send_arr[i][k]), \n",
    "                           fontsize=11, family='monospace', color='red', bbox=props)\n",
    "\n",
    "        Z = np.split(Z, len(settings['3d_plot']['ticks']['y']))\n",
    "\n",
    "        # Plot the surface.\n",
    "        surf = ax_3d.plot_surface(X, Y, Z, cmap=cm.Blues, linewidth=135,\n",
    "                                  antialiased=False, shade=True)\n",
    "        ax_3d.plot_wireframe(X, Y, Z, 10, lw=1, colors=\"k\", linestyles=\"solid\")\n",
    "\n",
    "        # Customize the z axis.\n",
    "        ax_3d.set_zlim(0, np.max(np.ceil(Z)))\n",
    "        ax_3d.zaxis.set_major_locator(LinearLocator(10))\n",
    "\n",
    "        ax_3d.set_xlabel(settings['3d_plot']['axis_labels']['x'], fontsize=11, family='monospace', labelpad=14)\n",
    "        ax_3d.set_ylabel(settings['3d_plot']['axis_labels']['y'], fontsize=11, family='monospace', labelpad=8)\n",
    "        ax_3d.set_zlabel(settings['3d_plot']['axis_labels']['z'], fontsize=11, family='monospace', labelpad=8)\n",
    "\n",
    "        ax_3d.set_xticks(np.arange(0, len(settings['3d_plot']['ticks']['x']), 1))\n",
    "        ax_3d.set_xticklabels(settings['3d_plot']['ticks']['x'])\n",
    "\n",
    "        ax_3d.set_yticklabels(settings['3d_plot']['ticks']['y'])\n",
    "        ax_3d.set_zticks(np.arange(0, len(settings['3d_plot']['ticks']['z']), 1))\n",
    "\n",
    "        x = np.argmin(Z) % (len(settings['3d_plot']['ticks']['x']))\n",
    "        y = np.floor(np.argmin(Z) / (len(settings['3d_plot']['ticks']['y'])))\n",
    "        z = np.min(Z)\n",
    "        \n",
    "        ax_3d.plot([x,x],[y,y],z, marker='v', color = 'green', markersize=15, label=\"Minimum: \"+str(z)+ \" µs\")\n",
    "\n",
    "        x = np.argmax(Z) % (len(settings['3d_plot']['ticks']['x']))\n",
    "        y = np.floor(np.argmax(Z) / (len(settings['3d_plot']['ticks']['y'])))\n",
    "        z = np.max(Z)\n",
    "        \n",
    "        ax_3d.plot([x,x],[y,y],z, marker='^', color = 'red', markersize=15, label=\"Maximum: \"+str(z)+ \" µs\")\n",
    "\n",
    "        norm = mpl.colors.Normalize(vmin=np.min(Z), vmax=np.max(Z))\n",
    "        cb = fig_3d.colorbar(surf, shrink=0.8, aspect=10, fraction=0.1, norm=norm)\n",
    "        cb.set_label(settings['3d_plot']['axis_labels']['z'], fontsize=11, family='monospace', labelpad=8)\n",
    "        plt.tight_layout()\n",
    "        plt.show()\n",
    "\n",
    "        fig_3d.savefig('{}/median_3d_graph_{}.pdf'.format(rootdir, settings_array[i][0][2]), dpi=600, format='pdf')\n",
    "\n",
    "        \n",
    "        ###################################\n",
    "        # CREATE LEGEND ###################\n",
    "        ###################################\n",
    "        # create a second figure for the legend\n",
    "        figLegend = pylab.figure(figsize = settings['3d_plot']['dimensions']['legend'])\n",
    "\n",
    "        # The markers are too big, so lets create smaller markers\n",
    "        ax_custom = figLegend.add_subplot(111)\n",
    "        ax_custom.plot(0,0, marker='v', color = 'green', label=\"$\\\\min\\\\,\\\\tilde{t}_{lat}$: \"+str(np.min(Z))+ \" µs\", markersize=8, linestyle = 'None')\n",
    "        ax_custom.plot(0,0, marker='^', color = 'red', label=\"$\\\\max\\\\,\\\\tilde{t}_{lat}$: \"+str(np.max(Z))+ \" µs\", markersize=8, linestyle = 'None')\n",
    "        ax_custom.set_visible(False)\n",
    "\n",
    "        # Create bbox patch for legend\n",
    "        p = FancyBboxPatch((0, 0), 5, 1, boxstyle=\"round, pad=0.5\", ec=\"#dbdbdb\", fc=\"white\", alpha=0.85)\n",
    "        \n",
    "        handles = []\n",
    "        handles.append(p)\n",
    "        text= '% of samples missed by signal generator'\n",
    "        leg2 = mlegend.Legend(ax_custom, handles, labels=[text], loc = 'upper left', ncol=1,\n",
    "                              prop={'family':'monospace', 'size':'8'})\n",
    "\n",
    "        # Extract handles from pseudo plot\n",
    "        handles, labels = ax_custom.get_legend_handles_labels()\n",
    "\n",
    "        leg_temp = pylab.figlegend(handles, labels, loc = 'upper left', labelspacing=1.2,\n",
    "                        prop={'family':'monospace', 'size':'8'}, ncol=settings['3d_plot']['legend_columns'])\n",
    "        \n",
    "        # Concat handles\n",
    "        leg_temp._legend_box._children.append(leg2._legend_box._children[1])\n",
    "        leg_temp._legend_box.align=\"left\"\n",
    "        \n",
    "        # Save figure\n",
    "        figLegend.savefig(\"{}/legend_median_3d_plot_{}.pdf\".format(rootdir, settings_array[i][0][2]), format='pdf')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}