\relax 
\providecommand\hyper@newdestlabel[2]{}
\providecommand\HyperFirstAtBeginDocument{\AtBeginDocument}
\HyperFirstAtBeginDocument{\ifx\hyper@anchor\@undefined
\global\let\oldcontentsline\contentsline
\gdef\contentsline#1#2#3#4{\oldcontentsline{#1}{#2}{#3}}
\global\let\oldnewlabel\newlabel
\gdef\newlabel#1#2{\newlabelxx{#1}#2}
\gdef\newlabelxx#1#2#3#4#5#6{\oldnewlabel{#1}{{#2}{#3}}}
\AtEndDocument{\ifx\hyper@anchor\@undefined
\let\contentsline\oldcontentsline
\let\newlabel\oldnewlabel
\fi}
\fi}
\global\let\hyper@last\relax 
\gdef\HyperFirstAtBeginDocument#1{#1}
\providecommand\HyField@AuxAddToFields[1]{}
\providecommand\HyField@AuxAddToCoFields[2]{}
\citation{Kramer2009}
\citation{Ghosh2011}
\citation{Chen2013}
\citation{Jung2004}
\citation{Mukamel2009}
\citation{Bullmore2009}
\newlabel{FirstPage}{{}{1}{}{section*.1}{}}
\@writefile{toc}{\contentsline {title}{Neural network structure inference from its time series}{1}{section*.2}}
\@writefile{toc}{\contentsline {abstract}{Abstract}{1}{section*.1}}
\newlabel{Introduction}{{I}{1}{}{section*.3}{}}
\@writefile{toc}{\contentsline {section}{\numberline {I}Introduction }{1}{section*.3}}
\@writefile{toc}{\contentsline {subsection}{\numberline {A}(Brief) Motivation}{1}{section*.4}}
\@writefile{toc}{\contentsline {subsection}{\numberline {B}Statement of problem}{1}{section*.5}}
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces The problem of neural network structure inference from its time series. We observe $N$ neurons (solid circles) whose synaptic connections are represented by solid arrows. Note that the $N$ observed neurons can be influenced by nodes that are not in the observed set (dashed circles). The blue arrows represent a single-neuron recording device (\emph  {e.g.} single-cell voltage-measuring electrodes) that yield time-series data $x_i[t]$ for each observed node. The basic task of ``neural network inference'' is to reconstruct the synaptic connections by computational analysis of the set of single-node measurements $\left \{x_i[t]\right \}$.}}{1}{figure.1}}
\newlabel{fig:statement}{{1}{1}{The problem of neural network structure inference from its time series. We observe $N$ neurons (solid circles) whose synaptic connections are represented by solid arrows. Note that the $N$ observed neurons can be influenced by nodes that are not in the observed set (dashed circles). The blue arrows represent a single-neuron recording device (\emph {e.g.} single-cell voltage-measuring electrodes) that yield time-series data $x_i[t]$ for each observed node. The basic task of ``neural network inference'' is to reconstruct the synaptic connections by computational analysis of the set of single-node measurements $\left \{x_i[t]\right \}$}{figure.1}{}}
\citation{Chen2013}
\citation{Gomez-Rodriguez2010}
\citation{Prakash2012}
\@writefile{toc}{\contentsline {section}{\numberline {II}General questions that motivate the work}{2}{section*.6}}
\newlabel{subsec:coupling_metrics}{{II\tmspace  +\thinmuskip {.1667em}A}{2}{}{section*.7}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {A}Comparison of various coupling metrics}{2}{section*.7}}
\newlabel{eqn:xcorr}{{1}{2}{}{equation.2.1}{}}
\newlabel{subsec:temporal_filtering}{{II\tmspace  +\thinmuskip {.1667em}B}{2}{}{section*.8}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {B}Effect of temporal filtering on network inferrability}{2}{section*.8}}
\newlabel{subsec:complex_topology}{{II\tmspace  +\thinmuskip {.1667em}C}{2}{}{section*.9}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {C}Inference of complex network topology}{2}{section*.9}}
\newlabel{subsubsec:cascades}{{II\tmspace  +\thinmuskip {.1667em}D}{2}{}{section*.10}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {D}Connection to cascades}{2}{section*.10}}
\citation{Izhikevich2003}
\citation{Kramer2009}
\citation{Seth2010}
\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Overall workflow for the numerical experiments performed in this paper. [A] A neural population of $N$ cells with $M$ excitatory connections is simulated. [B] Optional application of LPF on the neural time traces. [C1,C2] Computation of coupling metric for every node pair. [D] The coupling value for each pair is converted into a $p$-value. [E] The list of $p$-values is used by the FDR method to infer signifcant edges. [F] The inferred graph is scored against the ground truth.}}{3}{figure.2}}
\newlabel{fig:workflow}{{2}{3}{Overall workflow for the numerical experiments performed in this paper. [A] A neural population of $N$ cells with $M$ excitatory connections is simulated. [B] Optional application of LPF on the neural time traces. [C1,C2] Computation of coupling metric for every node pair. [D] The coupling value for each pair is converted into a $p$-value. [E] The list of $p$-values is used by the FDR method to infer signifcant edges. [F] The inferred graph is scored against the ground truth}{figure.2}{}}
\@writefile{toc}{\contentsline {section}{\numberline {III}Analysis framework}{3}{section*.11}}
\@writefile{toc}{\contentsline {subsection}{\numberline {A}Numerical model of neuronal dynamics -- Izhikevich neuron model}{3}{section*.12}}
\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces Typical neural data as generated by the Izhikevich numerical model. For clarity, ten traces are shown from a simulation of $N=100$ neurons.}}{3}{figure.3}}
\newlabel{fig:izhikevich_sim}{{3}{3}{Typical neural data as generated by the Izhikevich numerical model. For clarity, ten traces are shown from a simulation of $N=100$ neurons}{figure.3}{}}
\citation{Kramer2009}
\citation{Benjamini1995}
\citation{Benjamini1995}
\citation{Kramer2009}
\newlabel{subsec:granger}{{III\tmspace  +\thinmuskip {.1667em}B}{4}{}{section*.13}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {B}Coupling metric for directed edge inference -- pairwise Granger causality}{4}{section*.13}}
\newlabel{eqn:univariate}{{3}{4}{}{equation.3.3}{}}
\newlabel{eqn:bivariate}{{4}{4}{}{equation.3.4}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {C}FDR-based network inference from multivariate time series}{4}{section*.14}}
\@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces Individual steps in the FDR-based mechanism for undirected edge inference, shown here for a ground truth neural network with $N=10$ neurons and $M=10$ directed edges. [A] For every pair of neurons in the network, compute the coupling score. In this case, there is a notable coupling between neurons $1$ and $2$, as measured by the max cross-correlation $\mathcal  {C}_{12}\approx 0.15$. [B] Compare the measured cross-correlation against the ``null distribution,'' \emph  {i.e.} the distribution of $\mathcal  {C}_{ij}$ for neurons $i,j$ that are not connected. The comparison of $\mathcal  {C}_{12}$ against the null distribution yields a significance value (a $p$-score) to be associated with the observed score. Here, the measurement of $\mathcal  {C}_{12}\approx 0.15$ is highly significant, given that the null distribution PDF has most of its weight around $0.03<\mathcal  {C}_{ij}<0.08$. [C] Identify the neuron pairs with sufficiently significant $p$-values according to the FDR-procedure of Benjamini and Hochberg (see text). The red dashed line is $q\cdot i/m$ for FDR level $q=0.1$. [D] The results of the particular inference instance. Filled green squares represent true positives (the algorithm inferred a true edge), filled red squares represent false positives (the algorithm inferred an edge falsely), and the unfilled green square represent false negatives (the algorithm failed to infer a true edge). Note that we plot all results in the upper right triangle of the adjacency matrix (where multiple ground truth edges may overlap), since the cross-correlation can only infer undirected edges.}}{5}{figure.4}}
\newlabel{fig:fdr_mechanism}{{4}{5}{Individual steps in the FDR-based mechanism for undirected edge inference, shown here for a ground truth neural network with $N=10$ neurons and $M=10$ directed edges. [A] For every pair of neurons in the network, compute the coupling score. In this case, there is a notable coupling between neurons $1$ and $2$, as measured by the max cross-correlation $\mathcal {C}_{12}\approx 0.15$. [B] Compare the measured cross-correlation against the ``null distribution,'' \emph {i.e.} the distribution of $\mathcal {C}_{ij}$ for neurons $i,j$ that are not connected. The comparison of $\mathcal {C}_{12}$ against the null distribution yields a significance value (a $p$-score) to be associated with the observed score. Here, the measurement of $\mathcal {C}_{12}\approx 0.15$ is highly significant, given that the null distribution PDF has most of its weight around $0.03<\mathcal {C}_{ij}<0.08$. [C] Identify the neuron pairs with sufficiently significant $p$-values according to the FDR-procedure of Benjamini and Hochberg (see text). The red dashed line is $q\cdot i/m$ for FDR level $q=0.1$. [D] The results of the particular inference instance. Filled green squares represent true positives (the algorithm inferred a true edge), filled red squares represent false positives (the algorithm inferred an edge falsely), and the unfilled green square represent false negatives (the algorithm failed to infer a true edge). Note that we plot all results in the upper right triangle of the adjacency matrix (where multiple ground truth edges may overlap), since the cross-correlation can only infer undirected edges}{figure.4}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {D}Validation of FDR-based edge inference}{5}{section*.15}}
\@writefile{lof}{\contentsline {figure}{\numberline {5}{\ignorespaces Explicit validation of the FDR method. For each FDR level $q$, we simulate $50$ instances of the inference run on a graph with $N=100$ and $M=100$. We then measure the actual FDR (the fraction of false positives in the inferred set). The mean and standard deviation of the actual FDR is presented, which shows the distribution of actual FDRs to be well-bounded by the specified FDR level (dashed diagonal).}}{6}{figure.5}}
\newlabel{fig:fdr_valid}{{5}{6}{Explicit validation of the FDR method. For each FDR level $q$, we simulate $50$ instances of the inference run on a graph with $N=100$ and $M=100$. We then measure the actual FDR (the fraction of false positives in the inferred set). The mean and standard deviation of the actual FDR is presented, which shows the distribution of actual FDRs to be well-bounded by the specified FDR level (dashed diagonal)}{figure.5}{}}
\@writefile{toc}{\contentsline {section}{\numberline {IV}The effect of temporal filtering on edge inference}{6}{section*.16}}
\@writefile{toc}{\contentsline {subsection}{\numberline {A}Undirected inference}{6}{section*.17}}
\@writefile{lof}{\contentsline {figure}{\numberline {6}{\ignorespaces The effect of low pass filtering (here, $\tau =10$ ms) on the neural traces and the cross-correlation. The two neurons ($1$ and $3$) share a synaptic connection in the ground truth network. The unfiltered quantities are plotted in blue, and the filtered results are shown in red.}}{6}{figure.6}}
\newlabel{fig:temporal_filter}{{6}{6}{The effect of low pass filtering (here, $\tau =10$ ms) on the neural traces and the cross-correlation. The two neurons ($1$ and $3$) share a synaptic connection in the ground truth network. The unfiltered quantities are plotted in blue, and the filtered results are shown in red}{figure.6}{}}
\citation{KimPutrino2011}
\citation{Schreiber2000}
\bibdata{paperNotes,bibliography}
\bibcite{Kramer2009}{{1}{2009}{{Kramer\ \emph  {et~al.}}}{{Kramer, Eden, Cash,\ and\ Kolaczyk}}}
\bibcite{Ghosh2011}{{2}{2011}{{Ghosh\ \emph  {et~al.}}}{{Ghosh, Burns, Cocker, Nimmerjahn, Ziv, El~Gamal,\ and\ Schnitzer}}}
\bibcite{Chen2013}{{3}{2013}{{Chen\ \emph  {et~al.}}}{{Chen, Wardill, Sun, Pulver, Renninger, Baohan, Schreiter, Kerr, Orger, Jayaraman, Looger, Svoboda,\ and\ Kim}}}
\bibcite{Jung2004}{{4}{2004}{{Jung\ \emph  {et~al.}}}{{Jung, Mehta, Aksay, Stepnoski,\ and\ Schnitzer}}}
\bibcite{Mukamel2009}{{5}{2009}{{Mukamel\ \emph  {et~al.}}}{{Mukamel, Nimmerjahn,\ and\ Schnitzer}}}
\bibcite{Bullmore2009}{{6}{2009}{{Bullmore\ and\ Sporns}}{{}}}
\bibcite{Gomez-Rodriguez2010}{{7}{2010}{{Gomez-Rodriguez\ \emph  {et~al.}}}{{Gomez-Rodriguez, Leskovec,\ and\ Krause}}}
\bibcite{Prakash2012}{{8}{2012}{{Prakash\ \emph  {et~al.}}}{{Prakash, Yizhar, Grewe, Ramakrishnan, Wang, Goshen, Packer, Peterka, Yuste, Schnitzer,\ and\ Deisseroth}}}
\bibcite{Izhikevich2003}{{9}{2003}{{Izhikevich}}{{}}}
\bibcite{Seth2010}{{10}{2010}{{Seth}}{{}}}
\bibcite{Benjamini1995}{{11}{1995}{{Benjamini\ and\ Hochberg}}{{}}}
\bibcite{KimPutrino2011}{{12}{2011}{{Kim\ \emph  {et~al.}}}{{Kim, Putrino, Ghosh,\ and\ Brown}}}
\bibcite{Schreiber2000}{{13}{2000}{{Schreiber}}{{}}}
\bibstyle{apsrev4-1}
\citation{REVTEX41Control}
\citation{apsrev41Control}
\@writefile{toc}{\contentsline {subsection}{\numberline {B}Directed inference}{7}{section*.18}}
\@writefile{toc}{\contentsline {section}{\numberline {V}Conclusions and future work}{7}{section*.19}}
\@writefile{toc}{\contentsline {section}{\numberline {}References}{7}{section*.20}}
\newlabel{LastBibItem}{{13}{7}{}{section*.20}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {7}{\ignorespaces Variation in the performance of undirected edge inference, when the neural signals are low-pass filtered prior to FDR-based inference. Unfiltered results are shown in blue, results with $\tau =5$\nobreakspace  {}ms LPF are shown in red, and results with $\tau =10$\nobreakspace  {}ms are shown in black. [Left] The FDR-based inference method remains valid even after low-pass filtering the time series; the actual FDRs are bound by the FDR level $q$ independently of filtering. [Center] The degradation in inference performance as measured on a standard ROC plot. [Right] Significant loss in performance is shown due to filtering of the time traces. For a fixed FDR level $q$, the algorithm makes significantly fewer inferences (in order to bound the false discovery rate) which results in a lower overall true positive rate (TPR).}}{8}{figure.7}}
\newlabel{fig:temporal_degradation}{{7}{8}{Variation in the performance of undirected edge inference, when the neural signals are low-pass filtered prior to FDR-based inference. Unfiltered results are shown in blue, results with $\tau =5$~ms LPF are shown in red, and results with $\tau =10$~ms are shown in black. [Left] The FDR-based inference method remains valid even after low-pass filtering the time series; the actual FDRs are bound by the FDR level $q$ independently of filtering. [Center] The degradation in inference performance as measured on a standard ROC plot. [Right] Significant loss in performance is shown due to filtering of the time traces. For a fixed FDR level $q$, the algorithm makes significantly fewer inferences (in order to bound the false discovery rate) which results in a lower overall true positive rate (TPR)}{figure.7}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {8}{\ignorespaces Variation in the performance of directed edge inference, when the neural signals are low-pass filtered prior to FDR-based inference. (See the caption of Fig.\nobreakspace  {}\ref  {fig:temporal_degradation} for details.)}}{8}{figure.8}}
\newlabel{fig:gcausal_temporal_degradation}{{8}{8}{Variation in the performance of directed edge inference, when the neural signals are low-pass filtered prior to FDR-based inference. (See the caption of Fig.~\ref {fig:temporal_degradation} for details.)}{figure.8}{}}
\newlabel{LastPage}{{}{8}{}{}{}}
