The Virtual Brain Project

Table Of Contents

Previous topic

adapters Package

Next topic

creators Package

This Page

analyzers Package

This is the module where all TVB Analyzers are hooked into the framework.

Define in __all__ attribute, modules to be introspected for finding adapters.

bct_adapters

class tvb.adapters.analyzers.bct_adapters.BaseBCT[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapter

Interface between Brain Connectivity Toolbox of Olaf Sporns and TVB Framework. This adapter requires BCT deployed locally, and Matlab or Octave installed separately of TVB.

build_connectivity_measure(result, key, connectivity, title='', label_x='', label_y='')[source]
build_float_value_wrapper(result, key, title='')[source]
build_int_value_wrapper(result, key, title='')[source]
static can_be_active()[source]
execute_matlab(matlab_code, data)[source]
get_connectivity(view_model)[source]
get_form_class()[source]
get_output()[source]
get_required_disk_size(view_model)[source]
get_required_memory_size(view_model)[source]
launch(view_model)[source]
class tvb.adapters.analyzers.bct_adapters.BaseBCTForm(prefix='', project_id=None, draw_ranges=True)[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapterForm

static get_filters()[source]
static get_input_name()[source]
static get_required_datatype()[source]
static get_view_model()[source]
class tvb.adapters.analyzers.bct_adapters.BaseBCTModel(**kwargs)[source]

Bases: tvb.core.neotraits.view_model.ViewModel

connectivity : tvb.adapters.analyzers.bct_adapters.BaseBCTModel.connectivity = DataTypeGidAttr(field_type=<class ‘uuid.UUID’>, default=None, required=True)

gid : tvb.basic.neotraits._core.HasTraits.gid = Attr(field_type=<class ‘uuid.UUID’>, default=None, required=True)

connectivity

Keep a GID but also link the type of DataType it should point to

class tvb.adapters.analyzers.bct_adapters.BaseUndirected[source]

Bases: tvb.adapters.analyzers.bct_adapters.BaseBCT

get_form_class()[source]
launch(view_model)[source]
class tvb.adapters.analyzers.bct_adapters.BaseUnidirectedBCTForm(prefix='', project_id=None, draw_ranges=True)[source]

Bases: tvb.adapters.analyzers.bct_adapters.BaseBCTForm

static get_filters()[source]
class tvb.adapters.analyzers.bct_adapters.DistanceDBIN[source]

Bases: tvb.adapters.analyzers.bct_adapters.BaseBCT

launch(view_model)[source]
class tvb.adapters.analyzers.bct_adapters.DistanceDWEI[source]

Bases: tvb.adapters.analyzers.bct_adapters.DistanceDBIN

class tvb.adapters.analyzers.bct_adapters.DistanceNETW[source]

Bases: tvb.adapters.analyzers.bct_adapters.DistanceDBIN

launch(view_model)[source]
class tvb.adapters.analyzers.bct_adapters.DistanceRDA[source]

Bases: tvb.adapters.analyzers.bct_adapters.DistanceRDM

class tvb.adapters.analyzers.bct_adapters.DistanceRDM[source]

Bases: tvb.adapters.analyzers.bct_adapters.DistanceDBIN

launch(view_model)[source]
class tvb.adapters.analyzers.bct_adapters.ModularityOCSM[source]

Bases: tvb.adapters.analyzers.bct_adapters.BaseBCT

launch(view_model)[source]
class tvb.adapters.analyzers.bct_adapters.ModularityOpCSMU[source]

Bases: tvb.adapters.analyzers.bct_adapters.ModularityOCSM

tvb.adapters.analyzers.bct_adapters.bct_description(mat_file_name)[source]

bct_centrality_adapters

class tvb.adapters.analyzers.bct_centrality_adapters.CentralityEdgeBinary[source]

Bases: tvb.adapters.analyzers.bct_centrality_adapters.CentralityNodeBinary

launch(view_model)[source]
class tvb.adapters.analyzers.bct_centrality_adapters.CentralityEdgeWeighted[source]

Bases: tvb.adapters.analyzers.bct_centrality_adapters.CentralityNodeWeighted

launch(view_model)[source]
class tvb.adapters.analyzers.bct_centrality_adapters.CentralityEigenVector[source]

Bases: tvb.adapters.analyzers.bct_adapters.BaseUndirected

launch(view_model)[source]
class tvb.adapters.analyzers.bct_centrality_adapters.CentralityKCoreness[source]

Bases: tvb.adapters.analyzers.bct_adapters.BaseUndirected

launch(view_model)[source]
class tvb.adapters.analyzers.bct_centrality_adapters.CentralityKCorenessBD[source]

Bases: tvb.adapters.analyzers.bct_centrality_adapters.CentralityNodeBinary

launch(view_model)[source]
class tvb.adapters.analyzers.bct_centrality_adapters.CentralityNodeBinary[source]

Bases: tvb.adapters.analyzers.bct_adapters.BaseBCT

launch(view_model)[source]
class tvb.adapters.analyzers.bct_centrality_adapters.CentralityNodeWeighted[source]

Bases: tvb.adapters.analyzers.bct_adapters.BaseBCT

launch(view_model)[source]
class tvb.adapters.analyzers.bct_centrality_adapters.CentralityShortcuts[source]

Bases: tvb.adapters.analyzers.bct_centrality_adapters.CentralityNodeBinary

launch(view_model)[source]
class tvb.adapters.analyzers.bct_centrality_adapters.FlowCoefficients[source]

Bases: tvb.adapters.analyzers.bct_centrality_adapters.CentralityNodeBinary

launch(view_model)[source]
class tvb.adapters.analyzers.bct_centrality_adapters.ParticipationCoefficient[source]

Bases: tvb.adapters.analyzers.bct_adapters.BaseBCT

launch(view_model)[source]
class tvb.adapters.analyzers.bct_centrality_adapters.ParticipationCoefficientSign[source]

Bases: tvb.adapters.analyzers.bct_centrality_adapters.ParticipationCoefficient

launch(view_model)[source]
class tvb.adapters.analyzers.bct_centrality_adapters.SubgraphCentrality[source]

Bases: tvb.adapters.analyzers.bct_centrality_adapters.CentralityNodeBinary

launch(view_model)[source]

bct_clustering_adapters

class tvb.adapters.analyzers.bct_clustering_adapters.ClusteringCoefficient[source]

Bases: tvb.adapters.analyzers.bct_adapters.BaseBCT

launch(view_model)[source]
class tvb.adapters.analyzers.bct_clustering_adapters.ClusteringCoefficientBU[source]

Bases: tvb.adapters.analyzers.bct_adapters.BaseUndirected

launch(view_model)[source]
class tvb.adapters.analyzers.bct_clustering_adapters.ClusteringCoefficientWD[source]

Bases: tvb.adapters.analyzers.bct_clustering_adapters.ClusteringCoefficient

launch(view_model)[source]
class tvb.adapters.analyzers.bct_clustering_adapters.ClusteringCoefficientWU[source]

Bases: tvb.adapters.analyzers.bct_adapters.BaseUndirected

launch(view_model)[source]
class tvb.adapters.analyzers.bct_clustering_adapters.TransitivityBinaryDirected[source]

Bases: tvb.adapters.analyzers.bct_adapters.BaseBCT

launch(view_model)[source]
class tvb.adapters.analyzers.bct_clustering_adapters.TransitivityBinaryUnDirected[source]

Bases: tvb.adapters.analyzers.bct_adapters.BaseUndirected

launch(view_model)[source]
class tvb.adapters.analyzers.bct_clustering_adapters.TransitivityWeightedDirected[source]

Bases: tvb.adapters.analyzers.bct_clustering_adapters.TransitivityBinaryDirected

launch(view_model)[source]
class tvb.adapters.analyzers.bct_clustering_adapters.TransitivityWeightedUnDirected[source]

Bases: tvb.adapters.analyzers.bct_clustering_adapters.TransitivityBinaryUnDirected

launch(view_model)[source]

bct_degree_adapters

class tvb.adapters.analyzers.bct_degree_adapters.Degree[source]

Bases: tvb.adapters.analyzers.bct_adapters.BaseBCT

launch(view_model)[source]
class tvb.adapters.analyzers.bct_degree_adapters.DegreeIOD[source]

Bases: tvb.adapters.analyzers.bct_degree_adapters.Degree

launch(view_model)[source]
class tvb.adapters.analyzers.bct_degree_adapters.DensityDirected[source]

Bases: tvb.adapters.analyzers.bct_adapters.BaseBCT

launch(view_model)[source]
class tvb.adapters.analyzers.bct_degree_adapters.DensityUndirected[source]

Bases: tvb.adapters.analyzers.bct_degree_adapters.DensityDirected

class tvb.adapters.analyzers.bct_degree_adapters.JointDegree[source]

Bases: tvb.adapters.analyzers.bct_degree_adapters.Degree

launch(view_model)[source]
class tvb.adapters.analyzers.bct_degree_adapters.MatchingIndex[source]

Bases: tvb.adapters.analyzers.bct_degree_adapters.Degree

launch(view_model)[source]
class tvb.adapters.analyzers.bct_degree_adapters.Strength[source]

Bases: tvb.adapters.analyzers.bct_degree_adapters.Degree

launch(view_model)[source]
class tvb.adapters.analyzers.bct_degree_adapters.StrengthISOS[source]

Bases: tvb.adapters.analyzers.bct_degree_adapters.Strength

launch(view_model)[source]
class tvb.adapters.analyzers.bct_degree_adapters.StrengthWeights[source]

Bases: tvb.adapters.analyzers.bct_degree_adapters.Strength

launch(view_model)[source]

cross_correlation_adapter

Adapter that uses the traits module to generate interfaces for ... Analyzer.

class tvb.adapters.analyzers.cross_correlation_adapter.CorrelationCoefficient(**kwargs)[source]

Bases: tvb.basic.neotraits._core.HasTraits

Model class defining the traited attributes used by the CorrelationCoefficientAdapter.
time_series : tvb.adapters.analyzers.cross_correlation_adapter.CorrelationCoefficient.time_series = Attr(field_type=<class ‘tvb.datatypes.time_series.TimeSeries’>, default=None, required=True)
The time-series for which the cross correlation matrices are calculated.
t_start : tvb.adapters.analyzers.cross_correlation_adapter.CorrelationCoefficient.t_start = Float(field_type=<class ‘float’>, default=0.9765625, required=True)
Time start point (ms). By default it uses the default Monitor sample period. The starting time point of a time series is not zero, but the monitor’s sample period.
t_end : tvb.adapters.analyzers.cross_correlation_adapter.CorrelationCoefficient.t_end = Float(field_type=<class ‘float’>, default=1000.0, required=True)
End time point (ms)

gid : tvb.basic.neotraits._core.HasTraits.gid = Attr(field_type=<class ‘uuid.UUID’>, default=None, required=True)

t_end

Declares a float. This is different from Attr(field_type=float). The former enforces float subtypes. This allows any type that can be safely cast to the declared float type according to numpy rules.

Reading and writing this attribute is slower than a plain python attribute. In performance sensitive code you might want to use plain python attributes or even better local variables.

t_start

Declares a float. This is different from Attr(field_type=float). The former enforces float subtypes. This allows any type that can be safely cast to the declared float type according to numpy rules.

Reading and writing this attribute is slower than a plain python attribute. In performance sensitive code you might want to use plain python attributes or even better local variables.

time_series

An Attr declares the following about the attribute it describes: * the type * a default value shared by all instances * if the value might be missing * documentation It will resolve to attributes on the instance.

class tvb.adapters.analyzers.cross_correlation_adapter.CrossCorrelate(**kwargs)[source]

Bases: tvb.basic.neotraits._core.HasTraits

Model class defining the traited attributes used by the CrossCorrelateAdapter.
time_series : tvb.adapters.analyzers.cross_correlation_adapter.CrossCorrelate.time_series = Attr(field_type=<class ‘tvb.datatypes.time_series.TimeSeries’>, default=None, required=True)
The time-series for which the cross correlation sequences are calculated.

gid : tvb.basic.neotraits._core.HasTraits.gid = Attr(field_type=<class ‘uuid.UUID’>, default=None, required=True)

time_series

An Attr declares the following about the attribute it describes: * the type * a default value shared by all instances * if the value might be missing * documentation It will resolve to attributes on the instance.

class tvb.adapters.analyzers.cross_correlation_adapter.CrossCorrelateAdapter[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapter

TVB adapter for calling the CrossCorrelate algorithm.

configure(view_model)[source]

Store the input shape to be later used to estimate memory usage.

Parameters:time_series – the input time-series index for which cross correlation should be computed
get_form_class()[source]
get_output()[source]
get_required_disk_size(view_model)[source]

Returns the required disk size to be able to run the adapter (in kB).

get_required_memory_size(view_model)[source]

Returns the required memory to be able to run the adapter.

launch(view_model)[source]

Launch algorithm and build results. Compute the node-pairwise cross-correlation of the source 4D TimeSeries represented by the index given as input.

Return a CrossCorrelationIndex. Create a CrossCorrelationH5 that contains the cross-correlation sequences for all possible combinations of the nodes.

See: http://www.scipy.org/doc/api_docs/SciPy.signal.signaltools.html#correlate

Parameters:time_series – the input time series index for which the correlation should be computed
Returns:the cross correlation index for the given time series
Return type:CrossCorrelationIndex
class tvb.adapters.analyzers.cross_correlation_adapter.CrossCorrelateAdapterForm(prefix='', project_id=None)[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapterForm

static get_filters()[source]
static get_input_name()[source]
static get_required_datatype()[source]
static get_view_model()[source]
class tvb.adapters.analyzers.cross_correlation_adapter.CrossCorrelateAdapterModel(**kwargs)[source]

Bases: tvb.core.neotraits.view_model.ViewModel, tvb.adapters.analyzers.cross_correlation_adapter.CrossCorrelate

time_series : tvb.adapters.analyzers.cross_correlation_adapter.CrossCorrelateAdapterModel.time_series = DataTypeGidAttr(field_type=<class ‘uuid.UUID’>, default=None, required=True)
The time-series for which the cross correlation sequences are calculated.

gid : tvb.basic.neotraits._core.HasTraits.gid = Attr(field_type=<class ‘uuid.UUID’>, default=None, required=True)

time_series

Keep a GID but also link the type of DataType it should point to

class tvb.adapters.analyzers.cross_correlation_adapter.PearsonCorrelationCoefficientAdapter[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapter

TVB adapter for calling the Pearson correlation coefficients algorithm.

configure(view_model)[source]

Store the input shape to be later used to estimate memory usage.

Parameters:
  • time_series – the input time-series index for which correlation coefficient should be computed
  • t_start – the physical time interval start for the analysis
  • t_end – physical time, interval end
get_form_class()[source]
get_output()[source]
get_required_disk_size(view_model)[source]

Returns the required disk size to be able to run the adapter (in kB).

get_required_memory_size(view_model)[source]

Returns the required memory to be able to run this adapter.

launch(view_model)[source]

Launch algorithm and build results. Compute the node-pairwise pearson correlation coefficient of the given input 4D TimeSeries datatype.

The result will contain values between -1 and 1, inclusive.

Parameters:
  • time_series – the input time-series for which correlation coefficient should be computed
  • t_start – the physical time interval start for the analysis
  • t_end – physical time, interval end
Returns:

the correlation coefficient for the given time series

Return type:

CorrelationCoefficients

class tvb.adapters.analyzers.cross_correlation_adapter.PearsonCorrelationCoefficientAdapterForm(prefix='', project_id=None)[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapterForm

static get_filters()[source]
static get_input_name()[source]
static get_required_datatype()[source]
get_traited_datatype()[source]
static get_view_model()[source]
class tvb.adapters.analyzers.cross_correlation_adapter.PearsonCorrelationCoefficientAdapterModel(**kwargs)[source]

Bases: tvb.core.neotraits.view_model.ViewModel, tvb.adapters.analyzers.cross_correlation_adapter.CorrelationCoefficient

time_series : tvb.adapters.analyzers.cross_correlation_adapter.PearsonCorrelationCoefficientAdapterModel.time_series = DataTypeGidAttr(field_type=<class ‘uuid.UUID’>, default=None, required=True)
The time-series for which the cross correlation matrices are calculated.
t_start : tvb.adapters.analyzers.cross_correlation_adapter.CorrelationCoefficient.t_start = Float(field_type=<class ‘float’>, default=0.9765625, required=True)
Time start point (ms). By default it uses the default Monitor sample period. The starting time point of a time series is not zero, but the monitor’s sample period.
t_end : tvb.adapters.analyzers.cross_correlation_adapter.CorrelationCoefficient.t_end = Float(field_type=<class ‘float’>, default=1000.0, required=True)
End time point (ms)

gid : tvb.basic.neotraits._core.HasTraits.gid = Attr(field_type=<class ‘uuid.UUID’>, default=None, required=True)

time_series

Keep a GID but also link the type of DataType it should point to

fcd_adapter

Adapter that uses the traits model to generate interfaces for FCD Analyzer.

class tvb.adapters.analyzers.fcd_adapter.FCDAdapterForm(prefix='', project_id=None)[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapterForm

static get_filters()[source]
static get_input_name()[source]
static get_required_datatype()[source]
get_traited_datatype()[source]
static get_view_model()[source]
class tvb.adapters.analyzers.fcd_adapter.FCDAdapterModel(**kwargs)[source]

Bases: tvb.core.neotraits.view_model.ViewModel, tvb.adapters.analyzers.fcd_adapter.FcdCalculator

time_series : tvb.adapters.analyzers.fcd_adapter.FCDAdapterModel.time_series = DataTypeGidAttr(field_type=<class ‘uuid.UUID’>, default=None, required=True)
The time-series for which the fcd matrices are calculated.
sw : tvb.adapters.analyzers.fcd_adapter.FcdCalculator.sw = Float(field_type=<class ‘float’>, default=120000, required=True)
Length of the time window used to divided the time series. FCD matrix is calculated in the following way: the time series is divided in time window of fixed length and with an overlapping of fixed length. The data-points within each window, centered at time ti, are used to calculate FC(ti) as Pearson correlation. The ij element of the FCD matrix is calculated as the Pearson Correlation between FC(ti) and FC(tj) arranged in a vector.
sp : tvb.adapters.analyzers.fcd_adapter.FcdCalculator.sp = Float(field_type=<class ‘float’>, default=2000, required=True)
Spanning= (time windows length)-(overlapping between two consecutive time window). FCD matrix is calculated in the following way: the time series is divided in time window of fixed length and with an overlapping of fixed length. The data-points within each window, centered at time ti, are used to calculate FC(ti) as Pearson Correlation. The ij element of the FCD matrix is calculated as the Pearson correlation between FC(ti) and FC(tj) arranged in a vector

gid : tvb.basic.neotraits._core.HasTraits.gid = Attr(field_type=<class ‘uuid.UUID’>, default=None, required=True)

time_series

Keep a GID but also link the type of DataType it should point to

class tvb.adapters.analyzers.fcd_adapter.FcdCalculator(**kwargs)[source]

Bases: tvb.basic.neotraits._core.HasTraits

Model class defining the traited attributes used by the FcdAdapter.
time_series : tvb.adapters.analyzers.fcd_adapter.FcdCalculator.time_series = Attr(field_type=<class ‘tvb.datatypes.time_series.TimeSeriesRegion’>, default=None, required=True)
The time-series for which the fcd matrices are calculated.
sw : tvb.adapters.analyzers.fcd_adapter.FcdCalculator.sw = Float(field_type=<class ‘float’>, default=120000, required=True)
Length of the time window used to divided the time series. FCD matrix is calculated in the following way: the time series is divided in time window of fixed length and with an overlapping of fixed length. The data-points within each window, centered at time ti, are used to calculate FC(ti) as Pearson correlation. The ij element of the FCD matrix is calculated as the Pearson Correlation between FC(ti) and FC(tj) arranged in a vector.
sp : tvb.adapters.analyzers.fcd_adapter.FcdCalculator.sp = Float(field_type=<class ‘float’>, default=2000, required=True)
Spanning= (time windows length)-(overlapping between two consecutive time window). FCD matrix is calculated in the following way: the time series is divided in time window of fixed length and with an overlapping of fixed length. The data-points within each window, centered at time ti, are used to calculate FC(ti) as Pearson Correlation. The ij element of the FCD matrix is calculated as the Pearson correlation between FC(ti) and FC(tj) arranged in a vector

gid : tvb.basic.neotraits._core.HasTraits.gid = Attr(field_type=<class ‘uuid.UUID’>, default=None, required=True)

sp

Declares a float. This is different from Attr(field_type=float). The former enforces float subtypes. This allows any type that can be safely cast to the declared float type according to numpy rules.

Reading and writing this attribute is slower than a plain python attribute. In performance sensitive code you might want to use plain python attributes or even better local variables.

sw

Declares a float. This is different from Attr(field_type=float). The former enforces float subtypes. This allows any type that can be safely cast to the declared float type according to numpy rules.

Reading and writing this attribute is slower than a plain python attribute. In performance sensitive code you might want to use plain python attributes or even better local variables.

time_series

An Attr declares the following about the attribute it describes: * the type * a default value shared by all instances * if the value might be missing * documentation It will resolve to attributes on the instance.

class tvb.adapters.analyzers.fcd_adapter.FunctionalConnectivityDynamicsAdapter[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapter

TVB adapter for calling the Pearson CrossCorrelation algorithm.

The present class will do the following actions:

  • Compute the the fcd of the timeseries; the fcd is calculated in the following way:

    the time series is divided in time window of fixed length and with an overlapping of fixed length. The data-points within each window, centered at time ti, are used to calculate FC(ti) as Pearson correlation The ij element of the FCD matrix is calculated as the Pearson correlation between FC(ti) and FC(tj) -in a vector

  • Apply to the fcd the spectral embedding algorithm in order to calculate epochs of stability of the fcd

    (length of time during which FC matrix are high correlated).

The algorithm can produce 2 kind of results:

  • case 1: the algorithm is able to identify the epochs of stability

    – fcs calculated over the epochs of stability (excluded the first one = artifact, due to initial conditions) – 3 eigenvectors, associated to the 3 largest eigenvalues, of the fcs are extracted

  • case 2: the algorithm is not able to identify the epochs of stability

    – fc over the all time series is calculated – 3 first eigenvectors, associated to the 3 largest eigenvalues, of the fcs are extracted

:return
  • fcd matrix whose values are between -1 and 1, inclusive.

  • in case 1: fcd matrix segmented i.e. fcd whose values are between -1 and 1.1, inclusive.

    (Value=1.1 for time not belonging to epochs of stability identified with spectral embedding algorithm) in case 2: fcd matrix segmented identical to the fcd matrix not segmented

  • dictionary containing the eigenvectors.

  • dictionary containing the eigenvalues

  • connectivity associated to the TimeSeriesRegions

configure(view_model)[source]

Store the input shape to be later used to estimate memory usage. Also create the algorithm instance.

Parameters:
  • time_series – the input time-series for which fcd matrix should be computed
  • sw – length of the sliding window
  • sp – spanning time: distance between two consecutive sliding window
get_form_class()[source]
get_output()[source]
get_required_disk_size(view_model)[source]
get_required_memory_size(view_model)[source]
launch(view_model)[source]

Launch algorithm and build results.

Parameters:
  • time_series – the input time-series index for which fcd matrix should be computed
  • sw – length of the sliding window
  • sp – spanning time: distance between two consecutive sliding window
Returns:

the fcd index for the computed fcd matrix on the given time-series, with that sw and that sp

Return type:

FcdIndex,`ConnectivityMeasureIndex`

fmri_balloon_adapter

Adapter that uses the traits module to generate interfaces for BalloonModel Analyzer.

class tvb.adapters.analyzers.fmri_balloon_adapter.BalloonModelAdapter[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapter

TVB adapter for calling the BalloonModel algorithm.

configure(view_model)[source]

Store the input shape to be later used to estimate memory usage. Also create the algorithm instance.

get_form_class()[source]
get_output()[source]
get_required_disk_size(view_model)[source]

Returns the required disk size to be able to run the adapter.(in kB)

get_required_memory_size(view_model)[source]

Return the required memory to run this algorithm.

launch(view_model)[source]

Launch algorithm and build results.

Parameters:time_series – the input time-series used as neural activation in the Balloon Model
Returns:the simulated BOLD signal
Return type:TimeSeries
class tvb.adapters.analyzers.fmri_balloon_adapter.BalloonModelAdapterForm(prefix='', project_id=None)[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapterForm

static get_filters()[source]
static get_input_name()[source]
static get_required_datatype()[source]
get_traited_datatype()[source]
static get_view_model()[source]
class tvb.adapters.analyzers.fmri_balloon_adapter.BalloonModelAdapterModel(**kwargs)[source]

Bases: tvb.core.neotraits.view_model.ViewModel

time_series : tvb.adapters.analyzers.fmri_balloon_adapter.BalloonModelAdapterModel.time_series = DataTypeGidAttr(field_type=<class ‘uuid.UUID’>, default=None, required=True)
The timeseries that represents the input neural activity
dt : tvb.adapters.analyzers.fmri_balloon_adapter.BalloonModelAdapterModel.dt = Float(field_type=<class ‘float’>, default=0.002, required=True)
The integration time step size for the balloon model (s). If none is provided, by default, the TimeSeries sample period is used.
neural_input_transformation : tvb.adapters.analyzers.fmri_balloon_adapter.BalloonModelAdapterModel.neural_input_transformation = Attr(field_type=<class ‘str’>, default=’none’, required=True)
This represents the operation to perform on the state-variable(s) of the model used to generate the input TimeSeries. none takes the first state-variable as neural input; `` abs_diff`` is the absolute value of the derivative (first order difference) of the first state variable; sum: sum all the state-variables of the input TimeSeries.
bold_model : tvb.adapters.analyzers.fmri_balloon_adapter.BalloonModelAdapterModel.bold_model = Attr(field_type=<class ‘str’>, default=’nonlinear’, required=True)
Select the set of equations for the BOLD model.
RBM : tvb.adapters.analyzers.fmri_balloon_adapter.BalloonModelAdapterModel.RBM = Attr(field_type=<class ‘bool’>, default=True, required=True)
Select classical vs revised BOLD model (CBM or RBM). Coefficients k1, k2 and k3 will be derived accordingly.

gid : tvb.basic.neotraits._core.HasTraits.gid = Attr(field_type=<class ‘uuid.UUID’>, default=None, required=True)

RBM

An Attr declares the following about the attribute it describes: * the type * a default value shared by all instances * if the value might be missing * documentation It will resolve to attributes on the instance.

bold_model

An Attr declares the following about the attribute it describes: * the type * a default value shared by all instances * if the value might be missing * documentation It will resolve to attributes on the instance.

dt

Declares a float. This is different from Attr(field_type=float). The former enforces float subtypes. This allows any type that can be safely cast to the declared float type according to numpy rules.

Reading and writing this attribute is slower than a plain python attribute. In performance sensitive code you might want to use plain python attributes or even better local variables.

neural_input_transformation

An Attr declares the following about the attribute it describes: * the type * a default value shared by all instances * if the value might be missing * documentation It will resolve to attributes on the instance.

time_series

Keep a GID but also link the type of DataType it should point to

fourier_adapter

Adapter that uses the traits module to generate interfaces for FFT Analyzer.

class tvb.adapters.analyzers.fourier_adapter.FFTAdapterForm(prefix='', project_id=None)[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapterForm

static get_filters()[source]
static get_input_name()[source]
static get_required_datatype()[source]
get_traited_datatype()[source]
static get_view_model()[source]
class tvb.adapters.analyzers.fourier_adapter.FFTAdapterModel(**kwargs)[source]

Bases: tvb.core.neotraits.view_model.ViewModel, tvb.analyzers.fft.FFT

Parameters have the following meaning: - time_series: the input time series to which the fft is to be applied - segment_length: the block size which determines the frequency resolution of the resulting power spectra - window_function: windowing functions can be applied before the FFT is performed - detrend: None; specify if detrend is performed on the time series
time_series : tvb.adapters.analyzers.fourier_adapter.FFTAdapterModel.time_series = DataTypeGidAttr(field_type=<class ‘uuid.UUID’>, default=None, required=True)
The TimeSeries to which the FFT is to be applied.
segment_length : tvb.analyzers.fft.FFT.segment_length = Float(field_type=<class ‘float’>, default=1000.0, required=False)
The TimeSeries can be segmented into equally sized blocks (overlapping if necessary). The segment length determines the frequency resolution of the resulting power spectra – longer windows produce finer frequency resolution.
window_function : tvb.analyzers.fft.FFT.window_function = Attr(field_type=<class ‘str’>, default=None, required=False)
Windowing functions can be applied before the FFT is performed. Default is None, possibilities are: ‘hamming’; ‘bartlett’; ‘blackman’; and ‘hanning’. See, numpy.<function_name>.
detrend : tvb.analyzers.fft.FFT.detrend = Attr(field_type=<class ‘bool’>, default=True, required=False)
Detrending is not always appropriate. Default is True, False means no detrending is performed on the time series

gid : tvb.basic.neotraits._core.HasTraits.gid = Attr(field_type=<class ‘uuid.UUID’>, default=None, required=True)

time_series

Keep a GID but also link the type of DataType it should point to

class tvb.adapters.analyzers.fourier_adapter.FourierAdapter[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapter

TVB adapter for calling the FFT algorithm.

configure(view_model)[source]

Do any configuration needed before launching.

get_form_class()[source]
get_output()[source]
get_required_disk_size(view_model)[source]

Returns the required disk size to be able to run the adapter (in kB).

get_required_memory_size(view_model)[source]

Returns the required memory to be able to run the adapter.

launch(view_model)[source]

Launch algorithm and build results.

Parameters:
  • time_series – the input time series to which the fft is to be applied
  • segment_length – the block size which determines the frequency resolution of the resulting power spectra
  • window_function (None; ‘hamming’; ‘bartlett’; ‘blackman’; ‘hanning’) – windowing functions can be applied before the FFT is performed
Returns:

the fourier spectrum for the specified time series

Return type:

FourierSpectrumIndex

ica_adapter

Adapter that uses the traits module to generate interfaces for ICA Analyzer.

class tvb.adapters.analyzers.ica_adapter.ICAAdapter[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapter

TVB adapter for calling the ICA algorithm.

configure(view_model)[source]

Store the input shape to be later used to estimate memory usage. Also create the algorithm instance.

get_form_class()[source]
get_output()[source]
get_required_disk_size(view_model)[source]

Returns the required disk size to be able to run the adapter (in kB).

get_required_memory_size(view_model)[source]

Return the required memory to run this algorithm.

launch(view_model)[source]

Launch algorithm and build results.

class tvb.adapters.analyzers.ica_adapter.ICAAdapterForm(prefix='', project_id=None)[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapterForm

static get_filters()[source]
static get_input_name()[source]
static get_required_datatype()[source]
get_traited_datatype()[source]
static get_view_model()[source]
class tvb.adapters.analyzers.ica_adapter.ICAAdapterModel(**kwargs)[source]

Bases: tvb.core.neotraits.view_model.ViewModel, tvb.analyzers.ica.FastICA

time_series : tvb.adapters.analyzers.ica_adapter.ICAAdapterModel.time_series = DataTypeGidAttr(field_type=<class ‘uuid.UUID’>, default=None, required=True)
The timeseries to which the ICA is to be applied.
n_components : tvb.analyzers.ica.FastICA.n_components = Int(field_type=<class ‘int’>, default=None, required=False)
Number of principal components to unmix.

gid : tvb.basic.neotraits._core.HasTraits.gid = Attr(field_type=<class ‘uuid.UUID’>, default=None, required=True)

time_series

Keep a GID but also link the type of DataType it should point to

matlab_worker

This module implements a class for executing arbitray MATLAB code

Conversion between Python types and MATLAB types is handled and dependent on scipy.io’s loadmat and savemat function.

class tvb.adapters.analyzers.matlab_worker.MatlabWorker[source]

Bases: builtins.object

MatlabAnalyzer is an helper class for calling arbitrary MATLAB code with arbitrary parameters.

Specific analyzers should derive from this class and implement the interface and launch methods inherited from Asynchronous Adapter.

add_to_path(path_to_add)[source]

Add a path to the list of paths that will be added to the path in the MATLAB session

cleanup()[source]

Make sure Matlab is closed after execution.

matlab(code, data=None, work_dir=None, cleanup=True)[source]

method matlab takes as arguments:

code: MATLAB code in a string data: a dict of data that scipy.io.savemat knows how to deal with work_dir: working directory to be used by MATLAB cleanup: set to False to keep files

and returns a tuple:

[0] string of code exec’d by MATLAB [1] string of log produced by MATLAB [2] dict of data from MATLAB’s workspace
matlab_paths = []

metrics_group_timeseries

Adapter that uses the traits module to generate interfaces for group of Analyzer used to calculate a single measure for TimeSeries.

class tvb.adapters.analyzers.metrics_group_timeseries.TimeseriesMetricsAdapter[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapter

TVB adapter for exposing as a group the measure algorithm.

configure(view_model)[source]

Store the input shape to be later used to estimate memory usage.

get_form_class()[source]
get_output()[source]
get_required_disk_size(view_model)[source]

Returns the required disk size to be able to run the adapter (in kB).

get_required_memory_size(view_model)[source]

Return the required memory to run this algorithm.

input_shape = ()
launch(view_model)[source]

Launch algorithm and build results.

Parameters:
  • time_series – the time series on which the algorithms are run
  • algorithms (any subclass of BaseTimeseriesMetricAlgorithm (KuramotoIndex, GlobalVariance, VarianceNodeVariance)) – the algorithms to be run for computing measures on the time series
Return type:

DatatypeMeasureIndex

class tvb.adapters.analyzers.metrics_group_timeseries.TimeseriesMetricsAdapterForm(prefix='', project_id=None)[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapterForm

static get_extra_algorithm_filters()[source]
static get_filters()[source]
static get_input_name()[source]
static get_required_datatype()[source]
static get_view_model()[source]
class tvb.adapters.analyzers.metrics_group_timeseries.TimeseriesMetricsAdapterModel(**kwargs)[source]

Bases: tvb.core.neotraits.view_model.ViewModel, tvb.analyzers.metrics_base.BaseTimeseriesMetricAlgorithm

time_series : tvb.adapters.analyzers.metrics_group_timeseries.TimeseriesMetricsAdapterModel.time_series = DataTypeGidAttr(field_type=<class ‘uuid.UUID’>, default=None, required=True)
The TimeSeries for which the metric(s) will be computed.
algorithms : tvb.adapters.analyzers.metrics_group_timeseries.TimeseriesMetricsAdapterModel.algorithms = List(of=<class ‘str’>, default=(), required=True)
The selected algorithms will all be applied on the input TimeSeries
start_point : tvb.analyzers.metrics_base.BaseTimeseriesMetricAlgorithm.start_point = Float(field_type=<class ‘float’>, default=500.0, required=False)
The start point determines how many points of the TimeSeries will be discarded before computing the metric. By default it drops the first 500 ms.
segment : tvb.analyzers.metrics_base.BaseTimeseriesMetricAlgorithm.segment = Int(field_type=<class ‘int’>, default=4, required=False)
Divide the input time-series into discrete equally sized sequences and use the last segment to compute the metric. It is only used when the start point is larger than the time-series length.

gid : tvb.basic.neotraits._core.HasTraits.gid = Attr(field_type=<class ‘uuid.UUID’>, default=None, required=True)

algorithms

The attribute is a list of values. Choices and type are reinterpreted as applying not to the list but to the elements of it

time_series

Keep a GID but also link the type of DataType it should point to

node_coherence_adapter

Adapter that uses the traits module to generate interfaces for FFT Analyzer.

class tvb.adapters.analyzers.node_coherence_adapter.NodeCoherenceAdapter[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapter

TVB adapter for calling the NodeCoherence algorithm.

configure(view_model)[source]

Store the input shape to be later used to estimate memory usage. Also create the algorithm instance.

get_form_class()[source]
get_output()[source]
get_required_disk_size(view_model)[source]

Returns the required disk size to be able to run the adapter (in kB).

get_required_memory_size(view_model)[source]

Return the required memory to run this algorithm.

launch(view_model)[source]

Launch algorithm and build results.

class tvb.adapters.analyzers.node_coherence_adapter.NodeCoherenceForm(prefix='', project_id=None)[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapterForm

static get_filters()[source]
static get_input_name()[source]
static get_required_datatype()[source]
get_traited_datatype()[source]
static get_view_model()[source]
class tvb.adapters.analyzers.node_coherence_adapter.NodeCoherenceModel(**kwargs)[source]

Bases: tvb.core.neotraits.view_model.ViewModel, tvb.analyzers.node_coherence.NodeCoherence

time_series : tvb.adapters.analyzers.node_coherence_adapter.NodeCoherenceModel.time_series = DataTypeGidAttr(field_type=<class ‘uuid.UUID’>, default=None, required=True)
The timeseries to which the FFT is to be applied.
nfft : tvb.analyzers.node_coherence.NodeCoherence.nfft = Int(field_type=<class ‘int’>, default=256, required=True)
Should be a power of 2...

gid : tvb.basic.neotraits._core.HasTraits.gid = Attr(field_type=<class ‘uuid.UUID’>, default=None, required=True)

time_series

Keep a GID but also link the type of DataType it should point to

node_complex_coherence_adapter

Adapter that uses the traits module to generate interfaces for FFT Analyzer.

class tvb.adapters.analyzers.node_complex_coherence_adapter.NodeComplexCoherenceAdapter[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapter

TVB adapter for calling the NodeComplexCoherence algorithm.

configure(view_model)[source]

Do any configuration needed before launching and create an instance of the algorithm.

get_form_class()[source]
get_output()[source]
get_required_disk_size(view_model)[source]

Returns the required disk size to be able to run the adapter (in kB).

get_required_memory_size(view_model)[source]

Return the required memory to run this algorithm.

launch(view_model)[source]

Launch algorithm and build results.

class tvb.adapters.analyzers.node_complex_coherence_adapter.NodeComplexCoherenceForm(prefix='', project_id=None)[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapterForm

static get_filters()[source]
static get_input_name()[source]
static get_required_datatype()[source]
get_traited_datatype()[source]
static get_view_model()[source]
class tvb.adapters.analyzers.node_complex_coherence_adapter.NodeComplexCoherenceModel(**kwargs)[source]

Bases: tvb.core.neotraits.view_model.ViewModel, tvb.analyzers.node_complex_coherence.NodeComplexCoherence

time_series : tvb.adapters.analyzers.node_complex_coherence_adapter.NodeComplexCoherenceModel.time_series = DataTypeGidAttr(field_type=<class ‘uuid.UUID’>, default=None, required=True)
The timeseries for which the CrossCoherence and ComplexCoherence is to be computed.
epoch_length : tvb.analyzers.node_complex_coherence.NodeComplexCoherence.epoch_length = Float(field_type=<class ‘float’>, default=1000.0, required=False)
In general for lengthy EEG recordings (~30 min), the timeseries are divided into equally sized segments (~ 20-40s). These contain the event that is to be characterized by means of the cross coherence. Additionally each epoch block will be further divided into segments to which the FFT will be applied.
segment_length : tvb.analyzers.node_complex_coherence.NodeComplexCoherence.segment_length = Float(field_type=<class ‘float’>, default=500.0, required=False)
The timeseries can be segmented into equally sized blocks (overlapping if necessary). The segment length determines the frequency resolution of the resulting power spectra – longer windows produce finer frequency resolution.
segment_shift : tvb.analyzers.node_complex_coherence.NodeComplexCoherence.segment_shift = Float(field_type=<class ‘float’>, default=250.0, required=False)
Time length by which neighboring segments are shifted. e.g. segment shift = segment_length / 2 means 50% overlapping segments.
window_function : tvb.analyzers.node_complex_coherence.NodeComplexCoherence.window_function = Attr(field_type=<class ‘str’>, default=’hanning’, required=False)
Windowing functions can be applied before the FFT is performed. Default is hanning, possibilities are: ‘hamming’; ‘bartlett’; ‘blackman’; and ‘hanning’. See, numpy.<function_name>.
average_segments : tvb.analyzers.node_complex_coherence.NodeComplexCoherence.average_segments = Attr(field_type=<class ‘bool’>, default=True, required=False)
Flag. If True, compute the mean Cross Spectrum across segments.
subtract_epoch_average : tvb.analyzers.node_complex_coherence.NodeComplexCoherence.subtract_epoch_average = Attr(field_type=<class ‘bool’>, default=True, required=False)
Flag. If True and if the number of epochs is > 1, you can optionally subtract the mean across epochs before computing the complex coherence.
zeropad : tvb.analyzers.node_complex_coherence.NodeComplexCoherence.zeropad = Int(field_type=<class ‘int’>, default=0, required=False)
Adds n zeros at the end of each segment and at the end of window_function. It is not yet functional.
detrend_ts : tvb.analyzers.node_complex_coherence.NodeComplexCoherence.detrend_ts = Attr(field_type=<class ‘bool’>, default=False, required=False)
Flag. If True removes linear trend along the time dimension before applying FFT.
max_freq : tvb.analyzers.node_complex_coherence.NodeComplexCoherence.max_freq = Float(field_type=<class ‘float’>, default=1024.0, required=False)
Maximum frequency points (e.g. 32., 64., 128.) represented in the output. Default is segment_length / 2 + 1.
npat : tvb.analyzers.node_complex_coherence.NodeComplexCoherence.npat = Float(field_type=<class ‘float’>, default=1.0, required=False)
This attribute appears to be related to an input projection matrix... Which is not yet implemented

gid : tvb.basic.neotraits._core.HasTraits.gid = Attr(field_type=<class ‘uuid.UUID’>, default=None, required=True)

time_series

Keep a GID but also link the type of DataType it should point to

node_covariance_adapter

Adapter that uses the traits module to generate interfaces for FFT Analyzer.

class tvb.adapters.analyzers.node_covariance_adapter.NodeCovariance(**kwargs)[source]

Bases: tvb.basic.neotraits._core.HasTraits

Model class defining the traited attributes used by the NodeCovarianceAdapter.
time_series : tvb.adapters.analyzers.node_covariance_adapter.NodeCovariance.time_series = Attr(field_type=<class ‘tvb.datatypes.time_series.TimeSeries’>, default=None, required=True)
The timeseries to which the NodeCovariance is to be applied.

gid : tvb.basic.neotraits._core.HasTraits.gid = Attr(field_type=<class ‘uuid.UUID’>, default=None, required=True)

time_series

An Attr declares the following about the attribute it describes: * the type * a default value shared by all instances * if the value might be missing * documentation It will resolve to attributes on the instance.

class tvb.adapters.analyzers.node_covariance_adapter.NodeCovarianceAdapter[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapter

TVB adapter for calling the NodeCovariance algorithm.

configure(view_model)[source]

Store the input shape to be later used to estimate memory usage.

get_form_class()[source]
get_output()[source]
get_required_disk_size(view_model)[source]

Returns the required disk size to be able to run the adapter ( in kB).

get_required_memory_size(view_model)[source]

Return the required memory to run this algorithm.

launch(view_model)[source]

Launch algorithm and build results.

Returns:the CovarianceIndex built with the given time_series index as source
class tvb.adapters.analyzers.node_covariance_adapter.NodeCovarianceAdapterForm(prefix='', project_id=None)[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapterForm

static get_filters()[source]
static get_input_name()[source]
static get_required_datatype()[source]
static get_view_model()[source]
class tvb.adapters.analyzers.node_covariance_adapter.NodeCovarianceAdapterModel(**kwargs)[source]

Bases: tvb.core.neotraits.view_model.ViewModel, tvb.adapters.analyzers.node_covariance_adapter.NodeCovariance

time_series : tvb.adapters.analyzers.node_covariance_adapter.NodeCovarianceAdapterModel.time_series = DataTypeGidAttr(field_type=<class ‘uuid.UUID’>, default=None, required=True)
The timeseries to which the NodeCovariance is to be applied.

gid : tvb.basic.neotraits._core.HasTraits.gid = Attr(field_type=<class ‘uuid.UUID’>, default=None, required=True)

time_series

Keep a GID but also link the type of DataType it should point to

pca_adapter

Adapter that uses the traits module to generate interfaces for FFT Analyzer.

class tvb.adapters.analyzers.pca_adapter.PCAAdapter[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapter

TVB adapter for calling the PCA algorithm.

configure(view_model)[source]

Store the input shape to be later used to estimate memory usage. Also create the algorithm instance.

get_form_class()[source]
get_output()[source]
get_required_disk_size(view_model)[source]

Returns the required disk size to be able to run the adapter (in kB).

get_required_memory_size(view_model)[source]

Return the required memory to run this algorithm.

launch(view_model)[source]

Launch algorithm and build results.

Returns:the PrincipalComponents object built with the given timeseries as source
class tvb.adapters.analyzers.pca_adapter.PCAAdapterForm(prefix='', project_id=None)[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapterForm

static get_filters()[source]
static get_input_name()[source]
static get_required_datatype()[source]
get_traited_datatype()[source]
static get_view_model()[source]
class tvb.adapters.analyzers.pca_adapter.PCAAdapterModel(**kwargs)[source]

Bases: tvb.core.neotraits.view_model.ViewModel, tvb.analyzers.pca.PCA

time_series : tvb.adapters.analyzers.pca_adapter.PCAAdapterModel.time_series = DataTypeGidAttr(field_type=<class ‘uuid.UUID’>, default=None, required=True)
The timeseries to which the PCA is to be applied. NOTE: The TimeSeries must be longer(more time-points) than the number of nodes – Mostly a problem for surface times-series, which, if sampled at 1024Hz, would need to be greater than 16 seconds long.

gid : tvb.basic.neotraits._core.HasTraits.gid = Attr(field_type=<class ‘uuid.UUID’>, default=None, required=True)

time_series

Keep a GID but also link the type of DataType it should point to

wavelet_adapter

Adapter that uses the traits module to generate interfaces for ContinuousWaveletTransform Analyzer.

class tvb.adapters.analyzers.wavelet_adapter.ContinuousWaveletTransformAdapter[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapter

TVB adapter for calling the ContinuousWaveletTransform algorithm.

configure(view_model)[source]

Store the input shape to be later used to estimate memory usage. Also create the algorithm instance.

get_form_class()[source]
get_output()[source]
get_required_disk_size(view_model)[source]

Returns the required disk size to be able to run the adapter.(in kB)

get_required_memory_size(view_model)[source]

Return the required memory to run this algorithm.

launch(view_model)[source]

Launch algorithm and build results.

class tvb.adapters.analyzers.wavelet_adapter.ContinuousWaveletTransformAdapterForm(prefix='', project_id=None)[source]

Bases: tvb.core.adapters.abcadapter.ABCAdapterForm

fill_trait(datatype)[source]
static get_filters()[source]
static get_input_name()[source]
static get_required_datatype()[source]
get_traited_datatype()[source]
static get_view_model()[source]
class tvb.adapters.analyzers.wavelet_adapter.RangeForm(prefix='')[source]

Bases: tvb.core.neotraits.forms.Form

class tvb.adapters.analyzers.wavelet_adapter.WaveletAdapterModel(**kwargs)[source]

Bases: tvb.core.neotraits.view_model.ViewModel, tvb.analyzers.wavelet.ContinuousWaveletTransform

time_series : tvb.adapters.analyzers.wavelet_adapter.WaveletAdapterModel.time_series = DataTypeGidAttr(field_type=<class ‘uuid.UUID’>, default=None, required=True)
The timeseries to which the wavelet is to be applied.
mother : tvb.analyzers.wavelet.ContinuousWaveletTransform.mother = Attr(field_type=<class ‘str’>, default=’morlet’, required=True)
The mother wavelet function used in the transform. Default is ‘morlet’, possibilities are: ‘morlet’...
sample_period : tvb.analyzers.wavelet.ContinuousWaveletTransform.sample_period = Float(field_type=<class ‘float’>, default=7.8125, required=True)
The sampling period of the computed wavelet spectrum. NOTE: This should be an integral multiple of the of the sampling period of the source time series, otherwise the actual resulting sample period will be the first correct value below that requested.
frequencies : tvb.analyzers.wavelet.ContinuousWaveletTransform.frequencies = Attr(field_type=<class ‘tvb.basic.neotraits._attr.Range’>, default=Range(lo=0.008, hi=0.06, step=0.002), required=True)
The frequency resolution and range returned. Requested frequencies are converted internally into appropriate scales.
normalisation : tvb.analyzers.wavelet.ContinuousWaveletTransform.normalisation = Attr(field_type=<class ‘str’>, default=’energy’, required=True)
The type of normalisation for the resulting wavet spectrum. Default is ‘energy’, options are: ‘energy’; ‘gabor’.
q_ratio : tvb.analyzers.wavelet.ContinuousWaveletTransform.q_ratio = Float(field_type=<class ‘float’>, default=5.0, required=True)
NFC. Must be greater than 5. Ratios of the center frequencies to bandwidths.

gid : tvb.basic.neotraits._core.HasTraits.gid = Attr(field_type=<class ‘uuid.UUID’>, default=None, required=True)

time_series

Keep a GID but also link the type of DataType it should point to