diff --git a/README.rst b/README.rst index c66e908..d79ef8a 100644 --- a/README.rst +++ b/README.rst @@ -70,7 +70,7 @@ Entropy 0.9945519071575192 0.8482185855709181 2.0754913760787277 - 2.1919237573930315 + 2.192416747827227 Fractal dimension ----------------- @@ -114,13 +114,13 @@ Here are some benchmarks computed on an average PC (i7-7700HQ CPU @ 2.80 Ghz - 8 %timeit spectral_entropy(x, 100, method='fft') %timeit svd_entropy(x, order=3, delay=1) %timeit app_entropy(x, order=2) # Slow - %timeit sample_entropy(x, order=2) # Slow + %timeit sample_entropy(x, order=2) # Numba # Fractal dimension %timeit petrosian_fd(x) %timeit katz_fd(x) - %timeit higuchi_fd(x) # Numba (fast) + %timeit higuchi_fd(x) # Numba # Other - %timeit detrended_fluctuation(x) # Numba (fast) + %timeit detrended_fluctuation(x) # Numba .. parsed-literal:: @@ -128,7 +128,7 @@ Here are some benchmarks computed on an average PC (i7-7700HQ CPU @ 2.80 Ghz - 8 150 µs ± 859 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each) 42.4 µs ± 306 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each) 4.59 ms ± 62.2 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) - 4.61 ms ± 163 µs per loop (mean ± std. dev. of 7 runs, 100 loops each) + 2.03 ms ± 39.5 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each) 16.4 µs ± 251 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each) 32.4 µs ± 578 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each) 17.4 µs ± 274 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each) diff --git a/docs/build/doctrees/environment.pickle b/docs/build/doctrees/environment.pickle index 894b681..d9a35e6 100644 Binary files a/docs/build/doctrees/environment.pickle and b/docs/build/doctrees/environment.pickle differ diff --git a/docs/build/doctrees/generated/entropy.sample_entropy.doctree b/docs/build/doctrees/generated/entropy.sample_entropy.doctree index 5dcf3c6..98f3ea7 100644 Binary files a/docs/build/doctrees/generated/entropy.sample_entropy.doctree and b/docs/build/doctrees/generated/entropy.sample_entropy.doctree differ diff --git a/docs/build/doctrees/index.doctree b/docs/build/doctrees/index.doctree index bc1d9c9..7020b29 100644 Binary files a/docs/build/doctrees/index.doctree and b/docs/build/doctrees/index.doctree differ diff --git a/docs/build/html/_modules/entropy/entropy.html b/docs/build/html/_modules/entropy/entropy.html index e42a144..4525e39 100644 --- a/docs/build/html/_modules/entropy/entropy.html +++ b/docs/build/html/_modules/entropy/entropy.html @@ -87,7 +87,8 @@
import numpy as np
-from math import factorial
+from numba import jit
+from math import factorial, log
from sklearn.neighbors import KDTree
from scipy.signal import periodogram, welch
@@ -367,6 +368,61 @@ Source code for entropy.entropy
return phi
+@jit('f8(f8[:], i4, f8)', nopython=True)
+def _numba_sampen(x, mm=2, r=0.2):
+ """
+ Fast evaluation of the sample entropy using Numba.
+ """
+ n = x.size
+ n1 = n - 1
+ mm += 1
+ mm_dbld = 2 * mm
+
+ # Define threshold
+ r *= x.std()
+
+ # initialize the lists
+ run = [0] * n
+ run1 = run[:]
+ r1 = [0] * (n * mm_dbld)
+ a = [0] * mm
+ b = a[:]
+ p = a[:]
+
+ for i in range(n1):
+ nj = n1 - i
+
+ for jj in range(nj):
+ j = jj + i + 1
+ if abs(x[j] - x[i]) < r:
+ run[jj] = run1[jj] + 1
+ m1 = mm if mm < run[jj] else run[jj]
+ for m in range(m1):
+ a[m] += 1
+ if j < n1:
+ b[m] += 1
+ else:
+ run[jj] = 0
+ for j in range(mm_dbld):
+ run1[j] = run[j]
+ r1[i + n * j] = run[j]
+ if nj > mm_dbld - 1:
+ for j in range(mm_dbld, nj):
+ run1[j] = run[j]
+
+ m = mm - 1
+
+ while m > 0:
+ b[m] = b[m - 1]
+ m -= 1
+
+ b[0] = n * n1 / 2
+ a = np.array([float(aa) for aa in a])
+ b = np.array([float(bb) for bb in b])
+ p = np.true_divide(a, b)
+ return -log(p[-1])
+
+
[docs]def app_entropy(x, order=2, metric='chebyshev'):
"""Approximate Entropy.
@@ -439,8 +495,6 @@ Source code for entropy.entropy
Notes
-----
- Original code from the mne-features package.
-
Sample entropy is a modification of approximate entropy, used for assessing
the complexity of physiological time-series signals. It has two advantages
over approximate entropy: data length independence and a relatively
@@ -458,8 +512,11 @@ Source code for entropy.entropy
:math:`C(m, r)` is the number of embedded vectors of length
:math:`m` having a Chebyshev distance inferior to :math:`r`.
- Code adapted from the mne-features package by Jean-Baptiste Schiratti
- and Alexandre Gramfort.
+ Note that if metric == 'chebyshev' and x.size < 5000 points, then the
+ sample entropy is computed using a fast custom Numba script. For other
+ metric types or longer time-series, the sample entropy is computed using
+ a code from the mne-features package by Jean-Baptiste Schiratti
+ and Alexandre Gramfort (requires sklearn).
References
----------
@@ -487,10 +544,12 @@ Source code for entropy.entropy
>>> print(sample_entropy(x, order=3, metric='euclidean'))
2.725
"""
- phi = _app_samp_entropy(x, order=order, metric=metric, approximate=False)
- if np.allclose(phi[0], 0) or np.allclose(phi[1], 0):
- raise ValueError('Sample Entropy is not defined.')
+ x = np.asarray(x, dtype=np.float64)
+ if metric == 'chebyshev' and x.size < 5000:
+ return _numba_sampen(x, mm=order, r=0.2)
else:
+ phi = _app_samp_entropy(x, order=order, metric=metric,
+ approximate=False)
return -np.log(np.divide(phi[1], phi[0]))
diff --git a/docs/build/html/_sources/index.rst.txt b/docs/build/html/_sources/index.rst.txt
index 906edde..6645e7e 100644
--- a/docs/build/html/_sources/index.rst.txt
+++ b/docs/build/html/_sources/index.rst.txt
@@ -65,7 +65,7 @@ Entropy
0.9945519071575192
0.8482185855709181
2.0754913760787277
- 2.1919237573930315
+ 2.192416747827227
Fractal dimension
-----------------
@@ -123,7 +123,7 @@ Here are some benchmarks computed on an average PC (i7-7700HQ CPU @ 2.80 Ghz - 8
150 µs ± 859 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
42.4 µs ± 306 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
4.59 ms ± 62.2 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
- 4.61 ms ± 163 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
+ 2.03 ms ± 39.5 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
16.4 µs ± 251 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
32.4 µs ± 578 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
17.4 µs ± 274 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
diff --git a/docs/build/html/generated/entropy.sample_entropy.html b/docs/build/html/generated/entropy.sample_entropy.html
index 9a22f1d..66f7774 100644
--- a/docs/build/html/generated/entropy.sample_entropy.html
+++ b/docs/build/html/generated/entropy.sample_entropy.html
@@ -119,7 +119,6 @@ entropy.sample_entropy
Notes
-Original code from the mne-features package.
Sample entropy is a modification of approximate entropy, used for assessing
the complexity of physiological time-series signals. It has two advantages
over approximate entropy: data length independence and a relatively
@@ -134,8 +133,11 @@
entropy.sample_entropy
\(m + 1\) having a Chebyshev distance inferior to \(r\) and
\(C(m, r)\) is the number of embedded vectors of length
\(m\) having a Chebyshev distance inferior to \(r\).
-Code adapted from the mne-features package by Jean-Baptiste Schiratti
-and Alexandre Gramfort.
+Note that if metric == ‘chebyshev’ and x.size < 5000 points, then the
+sample entropy is computed using a fast custom Numba script. For other
+metric types or longer time-series, the sample entropy is computed using
+a code from the mne-features package by Jean-Baptiste Schiratti
+and Alexandre Gramfort (requires sklearn).
References
diff --git a/docs/build/html/index.html b/docs/build/html/index.html
index a2b8bac..40ce0b7 100644
--- a/docs/build/html/index.html
+++ b/docs/build/html/index.html
@@ -134,7 +134,7 @@ Entropy
0.9945519071575192
0.8482185855709181
2.0754913760787277
-2.1919237573930315
+2.192416747827227
@@ -186,7 +186,7 @@ Execution time
150 µs ± 859 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
42.4 µs ± 306 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
4.59 ms ± 62.2 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
-4.61 ms ± 163 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
+2.03 ms ± 39.5 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
16.4 µs ± 251 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
32.4 µs ± 578 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
17.4 µs ± 274 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
diff --git a/docs/build/html/searchindex.js b/docs/build/html/searchindex.js
index 7719604..8003073 100644
--- a/docs/build/html/searchindex.js
+++ b/docs/build/html/searchindex.js
@@ -1 +1 @@
-Search.setIndex({docnames:["api","changelog","generated/entropy.app_entropy","generated/entropy.detrended_fluctuation","generated/entropy.higuchi_fd","generated/entropy.katz_fd","generated/entropy.perm_entropy","generated/entropy.petrosian_fd","generated/entropy.sample_entropy","generated/entropy.spectral_entropy","generated/entropy.svd_entropy","index"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":1,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.viewcode":1,sphinx:55},filenames:["api.rst","changelog.rst","generated\\entropy.app_entropy.rst","generated\\entropy.detrended_fluctuation.rst","generated\\entropy.higuchi_fd.rst","generated\\entropy.katz_fd.rst","generated\\entropy.perm_entropy.rst","generated\\entropy.petrosian_fd.rst","generated\\entropy.sample_entropy.rst","generated\\entropy.spectral_entropy.rst","generated\\entropy.svd_entropy.rst","index.rst"],objects:{entropy:{app_entropy:[2,0,1,""],detrended_fluctuation:[3,0,1,""],higuchi_fd:[4,0,1,""],katz_fd:[5,0,1,""],perm_entropy:[6,0,1,""],petrosian_fd:[7,0,1,""],sample_entropy:[8,0,1,""],spectral_entropy:[9,0,1,""],svd_entropy:[10,0,1,""]}},objnames:{"0":["py","function","Python function"]},objtypes:{"0":"py:function"},terms:{"2nd":[5,7],"4n_":7,"7700hq":11,"default":[2,3,8,9],"float":[2,3,4,5,6,7,8,9,10],"function":[2,3,4,8],"import":[2,3,4,5,6,7,8,9,10,11],"int":[2,4,6,8,9,10],"long":3,"new":[],"return":[2,3,4,5,6,7,8,9,10],"true":[6,9,10,11],"try":[],For:3,One:[2,3,4,5,6,7,8,9,10],The:[2,3,4,5,6,7,8,10],_log_n:3,adapt:[2,3,8],adequ:10,advantag:8,affin:3,alexandr:[2,4,5,8],algorithm:[5,7,11],all:[6,11],allow:3,alpha:3,alwai:11,american:[2,8],amount:2,analysi:[1,2,3,8,11],ani:11,anoth:[],api:[],app_entropi:11,appear:6,applic:5,approach:4,approxim:[1,2,8,11],arang:9,arrai:[2,3,4,5,6,7,8,9,10],assess:8,attain:6,author:11,avail:[2,8],averag:11,bandt:6,baptist:[2,4,5,8],base:7,basi:4,behav:3,behind:3,benchmark:11,bernd:6,between:[5,6,9,10],binari:7,biomark:[5,7],bit:[6,9,10],bool:[6,9,10],borrow:11,bound:6,brownian:3,bug:11,buldyrev:3,c_q:[],calcul:3,call:3,can:[7,11],chang:[3,7],character:8,chebyshev:[2,8,11],check:11,christoph:[3,6],cimed2005:[5,7],cindi:[5,7],circuit:5,circulatori:[2,8],clear:6,clinic:9,clone:11,code:[2,3,4,5,7,8,11],com:11,compar:6,comparison:[5,7],complet:6,complex:[6,7,8,11],compris:[6,10],comput:[4,5,7,11],confer:[5,7],consecut:6,contact:11,contain:6,contribut:11,cpu:11,creat:[6,10,11],credit:11,cschoel:[3,11],current:3,data:[2,8,9,10],decomposit:[1,10,11],decreas:6,defin:[5,6,7,8,9,10],definit:3,delai:[4,6,10,11],delta:7,dementia:[5,7],densiti:9,depend:[3,11],detail:3,detrend:[1,3,11],detrended_fluctu:11,dev:11,deviat:3,dfa:3,dfrac:[5,7,8],differ:7,dimens:[1,2,4,5,7,8],dimension:[2,3,4,5,6,7,8,9,10,11],discret:9,distanc:[5,8],divid:[6,9,10],dna:3,document:3,doe:3,doubl:11,dur:9,durat:9,each:[9,11],eeg:[5,7,9,11],effici:11,eigenvector:10,eighth:7,either:[],electroencephalographi:9,embed:[2,6,8,10],entropi:1,equat:3,estel:5,estim:[3,9],euclidean:[5,8],exampl:[3,4,5,6,7,8,9,10,11],excel:[3,11],execut:[],explan:10,expon:3,extract:11,f_s:9,factor:3,factori:[6,10],fals:[6,9,10],fast:[7,11],faster:3,fd_:5,featur:[2,4,5,8,11],feel:11,fft:[9,11],find:3,finit:7,first:[5,6],fluctuat:[1,2,3,11],fourier:9,fractal:[1,4,5,7],fraction:3,free:[3,8,11],frequenc:9,from:[2,3,4,5,6,7,8,9,10,11],frontier:3,fundament:5,furthest:5,gaussian:3,geissmann:7,ghz:11,gilestrolab:11,git:11,github:[3,11],given:[2,8],goal:[],goe:11,goh:[5,7],goldberg:3,gramfort:[2,4,5,8],h2039:[2,8],h2049:[2,8],h_q:[],hansen:3,hardston:3,has:8,have:8,havlin:3,healthcar:[5,7],heart:[2,8],here:11,hfd:4,high:8,higuchi:[1,4,11],higuchi_fd:11,html:3,http:[3,11],hurst:3,idea:3,ieee:[5,7],implement:[3,8],improv:[],includ:[],increas:6,inde:3,independ:8,indic:[2,8,10],inferior:8,inform:6,initi:1,inouy:9,intellig:[5,7],intern:[5,7],introduc:6,irregular:[4,9],issu:11,jansen:3,jean:[2,4,5,8],journal:[2,8],katz:[1,5,11],katz_fd:11,kdtree:[2,8],kfd:5,kind:11,kmax:[4,11],kolmogorov:7,larg:8,law:3,learn:11,len:3,length:[3,5,7,8,9],less:[],letter:6,like:[],linkenka:3,list:[2,3,4,5,6,7,8,9,10],log2:[6,9,10],log:8,log_2:[6,9,10],log_:[5,7],loop:11,lower:6,main:[],maintain:11,mansveld:3,manual:3,matrix:[6,10],maximum:4,mean:11,measur:[6,10],medic:7,medicin:[5,7],method:[9,11],metric:[2,8,11],mne:[2,4,5,8,11],mode:11,modif:8,more:[2,3,8,11],mosaic:3,motion:3,much:[],n_time:[2,6,8,9,10],name:[2,8],natur:6,need:10,neighbourhood:8,neuron:3,neurophysiolog:9,nikdon:11,nikulin:3,nois:3,nold:[3,11],non:3,none:9,nonlinear:4,normal:[6,9,10,11],normalis:9,normliz:[],note:[2,3,4,5,6,7,8,9,10,11],nperseg:9,nucleotid:3,numba:[3,4,11],number:[4,7,8,9,10],numpi:[2,3,4,5,7,8,9,11],offset:4,one:11,open:11,optim:[],order:[2,6,8,10,11],organ:3,origin:[2,3,4,5,7,8],oscil:3,other:10,otherwis:[6,9,10],over:[2,3,6,8],overlap:3,overlin:10,packag:[2,3,4,5,7,8,11],paramet:[2,3,4,5,6,7,8,9,10],pattern:7,peng:3,per:11,periodogram:9,perm_entropi:11,permut:[1,6,10,11],petrosian:[1,7,11],petrosian_fd:11,pfd:7,phenomena:4,physic:[3,6],physica:4,physiolog:[2,3,8],pip:11,pleas:[3,11],poil:3,point:5,pomp:6,posit:[],possibl:6,power:[3,9],predict:2,preictal:7,print:[2,3,4,5,6,7,8,9,10,11],probabl:6,proceed:7,process:3,program:11,provid:[5,7,11],psd:9,pull:11,pure:9,pyentropi:11,pyrem:[7,11],python:[3,11],quantif:9,quantifi:2,quentin:7,rac:[],radiu:8,ram:11,rand:[2,3,4,5,7,8,9,11],random:[2,3,4,5,6,7,8,9,11],raphael:11,raphaelvallat:11,recognit:7,refer:[2,3,4,5,6,7,8,9],regular:[2,8],rel:8,releas:1,report:11,repositori:11,request:11,requir:11,respect:5,result:11,review:[3,6],richman:[2,8],round:9,run:[6,11],said:3,same:6,sampl:[1,2,4,8,9,11],sample_entropi:11,scale:3,schiavon:3,schiratti:[2,4,5,8],scholzel:3,scikit:11,scipi:[9,11],see:11,seed:[2,3,4,5,7,8,9,11],segment:9,self:[3,8],sequenc:[6,7],seri:[2,3,4,5,6,7,8,9,10,11],set:[2,3,10],setup:11,sever:11,shannon:9,shape:[2,6,8,9,10],sigma:10,sigma_1:10,sigma_2:10,sigma_m:10,sign:7,signal:[6,7,8,9,10,11],similar:[3,8],simon:3,sin:9,sine:9,singular:[1,10,11],size:[3,9],slow:11,smaller:[2,8],softwar:[],some:11,sourc:[2,3,4,5,6,7,8,9,10],specifi:3,spectral:[1,9,11],spectral_entropi:11,spectrum:9,speed:4,standard:3,stanlei:3,stationari:3,statist:3,std:[2,3,8,11],str:[2,8,9],submit:11,subseri:3,sum:6,sum_:[9,10],svd:10,svd_e:10,svd_entropi:11,symposium:7,system:[5,6,7],taken:3,techniqu:2,term:3,than:11,theori:[4,5],therefor:[],thi:[3,6,11],time:[2,3,4,5,6,7,8,9,10],timeit:11,tomoyuki:4,tool:11,total:[5,9],transact:5,transform:9,translat:7,troubl:8,two:8,txt:11,unpredict:2,upper:6,use:[3,9],used:[2,3,7,8,11],uses:[4,9],using:[2,8,9],util:3,valid_metr:[2,8],vallat:11,valu:[1,2,3,6,8,10,11],vector:[8,9],veri:3,via:9,view:3,visit:11,vol:3,warranti:11,waveform:5,welch:[9,11],welcom:11,were:11,what:[],where:[3,5,6,7,8,9,10],wherea:8,which:3,window:3,within:3,word:10,x_i:[6,10],you:11,zero:[]},titles:["API reference","What\u2019s new","entropy.app_entropy","entropy.detrended_fluctuation","entropy.higuchi_fd","entropy.katz_fd","entropy.perm_entropy","entropy.petrosian_fd","entropy.sample_entropy","entropy.spectral_entropy","entropy.svd_entropy","Installation"],titleterms:{"function":11,"new":1,acknowledg:11,api:0,app_entropi:2,content:[],detrended_fluctu:3,develop:11,dimens:[0,11],entropi:[0,2,3,4,5,6,7,8,9,10,11],execut:11,fractal:[0,11],higuchi_fd:4,instal:11,katz_fd:5,measur:11,minut:[],octob:1,other:[0,11],perm_entropi:6,permut:[],petrosian_fd:7,quickstart:[],refer:0,sample_entropi:8,spectral_entropi:9,svd_entropi:10,time:11,what:1}})
\ No newline at end of file
+Search.setIndex({docnames:["api","changelog","generated/entropy.app_entropy","generated/entropy.detrended_fluctuation","generated/entropy.higuchi_fd","generated/entropy.katz_fd","generated/entropy.perm_entropy","generated/entropy.petrosian_fd","generated/entropy.sample_entropy","generated/entropy.spectral_entropy","generated/entropy.svd_entropy","index"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":1,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,"sphinx.ext.viewcode":1,sphinx:55},filenames:["api.rst","changelog.rst","generated\\entropy.app_entropy.rst","generated\\entropy.detrended_fluctuation.rst","generated\\entropy.higuchi_fd.rst","generated\\entropy.katz_fd.rst","generated\\entropy.perm_entropy.rst","generated\\entropy.petrosian_fd.rst","generated\\entropy.sample_entropy.rst","generated\\entropy.spectral_entropy.rst","generated\\entropy.svd_entropy.rst","index.rst"],objects:{entropy:{app_entropy:[2,0,1,""],detrended_fluctuation:[3,0,1,""],higuchi_fd:[4,0,1,""],katz_fd:[5,0,1,""],perm_entropy:[6,0,1,""],petrosian_fd:[7,0,1,""],sample_entropy:[8,0,1,""],spectral_entropy:[9,0,1,""],svd_entropy:[10,0,1,""]}},objnames:{"0":["py","function","Python function"]},objtypes:{"0":"py:function"},terms:{"2nd":[5,7],"4n_":7,"7700hq":11,"default":[2,3,8,9],"float":[2,3,4,5,6,7,8,9,10],"function":[2,3,4,8],"import":[2,3,4,5,6,7,8,9,10,11],"int":[2,4,6,8,9,10],"long":3,"new":[],"return":[2,3,4,5,6,7,8,9,10],"true":[6,9,10,11],"try":[],For:[3,8],One:[2,3,4,5,6,7,8,9,10],The:[2,3,4,5,6,7,8,10],_log_n:3,adapt:[2,3],adequ:10,advantag:8,affin:3,alexandr:[2,4,5,8],algorithm:[5,7,11],all:[6,11],allow:3,alpha:3,alwai:11,american:[2,8],amount:2,analysi:[1,2,3,8,11],ani:11,anoth:[],api:[],app_entropi:11,appear:6,applic:5,approach:4,approxim:[1,2,8,11],arang:9,arrai:[2,3,4,5,6,7,8,9,10],assess:8,attain:6,author:11,avail:[2,8],averag:11,bandt:6,baptist:[2,4,5,8],base:7,basi:4,behav:3,behind:3,benchmark:11,bernd:6,between:[5,6,9,10],binari:7,biomark:[5,7],bit:[6,9,10],bool:[6,9,10],borrow:11,bound:6,brownian:3,bug:11,buldyrev:3,c_q:[],calcul:3,call:3,can:[7,11],chang:[3,7],character:8,chebyshev:[2,8,11],check:11,christoph:[3,6],cimed2005:[5,7],cindi:[5,7],circuit:5,circulatori:[2,8],clear:6,clinic:9,clone:11,code:[2,3,4,5,7,8,11],com:11,compar:6,comparison:[5,7],complet:6,complex:[6,7,8,11],compris:[6,10],comput:[4,5,7,8,11],confer:[5,7],consecut:6,contact:11,contain:6,contribut:11,cpu:11,creat:[6,10,11],credit:11,cschoel:[3,11],current:3,custom:8,data:[2,8,9,10],decomposit:[1,10,11],decreas:6,defin:[5,6,7,8,9,10],definit:3,delai:[4,6,10,11],delta:7,dementia:[5,7],densiti:9,depend:[3,11],detail:3,detrend:[1,3,11],detrended_fluctu:11,dev:11,deviat:3,dfa:3,dfrac:[5,7,8],differ:7,dimens:[1,2,4,5,7,8],dimension:[2,3,4,5,6,7,8,9,10,11],discret:9,distanc:[5,8],divid:[6,9,10],dna:3,document:3,doe:3,doubl:11,dur:9,durat:9,each:[9,11],eeg:[5,7,9,11],effici:11,eigenvector:10,eighth:7,either:[],electroencephalographi:9,embed:[2,6,8,10],entropi:1,equat:3,estel:5,estim:[3,9],euclidean:[5,8],exampl:[3,4,5,6,7,8,9,10,11],excel:[3,11],execut:[],explan:10,expon:3,extract:11,f_s:9,factor:3,factori:[6,10],fals:[6,9,10],fast:[7,8,11],faster:3,fd_:5,featur:[2,4,5,8,11],feel:11,fft:[9,11],find:3,finit:7,first:[5,6],fluctuat:[1,2,3,11],fourier:9,fractal:[1,4,5,7],fraction:3,free:[3,8,11],frequenc:9,from:[2,3,4,5,6,7,8,9,10,11],frontier:3,fundament:5,furthest:5,gaussian:3,geissmann:7,ghz:11,gilestrolab:11,git:11,github:[3,11],given:[2,8],goal:[],goe:11,goh:[5,7],goldberg:3,gramfort:[2,4,5,8],h2039:[2,8],h2049:[2,8],h_q:[],hansen:3,hardston:3,has:8,have:8,havlin:3,healthcar:[5,7],heart:[2,8],here:11,hfd:4,high:8,higuchi:[1,4,11],higuchi_fd:11,html:3,http:[3,11],hurst:3,idea:3,ieee:[5,7],implement:[3,8],improv:[],includ:[],increas:6,inde:3,independ:8,indic:[2,8,10],inferior:8,inform:6,initi:1,inouy:9,intellig:[5,7],intern:[5,7],introduc:6,irregular:[4,9],issu:11,jansen:3,jean:[2,4,5,8],journal:[2,8],katz:[1,5,11],katz_fd:11,kdtree:[2,8],kfd:5,kind:11,kmax:[4,11],kolmogorov:7,larg:8,law:3,learn:11,len:3,length:[3,5,7,8,9],less:[],letter:6,like:[],linkenka:3,list:[2,3,4,5,6,7,8,9,10],log2:[6,9,10],log:8,log_2:[6,9,10],log_:[5,7],longer:8,loop:11,lower:6,main:[],maintain:11,mansveld:3,manual:3,matrix:[6,10],maximum:4,mean:11,measur:[6,10],medic:7,medicin:[5,7],method:[9,11],metric:[2,8,11],mne:[2,4,5,8,11],mode:11,modif:8,more:[2,3,8,11],mosaic:3,motion:3,much:[],n_time:[2,6,8,9,10],name:[2,8],natur:6,need:10,neighbourhood:8,neuron:3,neurophysiolog:9,nikdon:11,nikulin:3,nois:3,nold:[3,11],non:3,none:9,nonlinear:4,normal:[6,9,10,11],normalis:9,normliz:[],note:[2,3,4,5,6,7,8,9,10,11],nperseg:9,nucleotid:3,numba:[3,4,8,11],number:[4,7,8,9,10],numpi:[2,3,4,5,7,8,9,11],offset:4,one:11,open:11,optim:[],order:[2,6,8,10,11],organ:3,origin:[2,3,4,5,7],oscil:3,other:[8,10],otherwis:[6,9,10],over:[2,3,6,8],overlap:3,overlin:10,packag:[2,3,4,5,7,8,11],paramet:[2,3,4,5,6,7,8,9,10],pattern:7,peng:3,per:11,periodogram:9,perm_entropi:11,permut:[1,6,10,11],petrosian:[1,7,11],petrosian_fd:11,pfd:7,phenomena:4,physic:[3,6],physica:4,physiolog:[2,3,8],pip:11,pleas:[3,11],poil:3,point:[5,8],pomp:6,posit:[],possibl:6,power:[3,9],predict:2,preictal:7,print:[2,3,4,5,6,7,8,9,10,11],probabl:6,proceed:7,process:3,program:11,provid:[5,7,11],psd:9,pull:11,pure:9,pyentropi:11,pyrem:[7,11],python:[3,11],quantif:9,quantifi:2,quentin:7,rac:[],radiu:8,ram:11,rand:[2,3,4,5,7,8,9,11],random:[2,3,4,5,6,7,8,9,11],raphael:11,raphaelvallat:11,recognit:7,refer:[2,3,4,5,6,7,8,9],regular:[2,8],rel:8,releas:1,report:11,repositori:11,request:11,requir:[8,11],respect:5,result:11,review:[3,6],richman:[2,8],round:9,run:[6,11],said:3,same:6,sampl:[1,2,4,8,9,11],sample_entropi:11,scale:3,schiavon:3,schiratti:[2,4,5,8],scholzel:3,scikit:11,scipi:[9,11],script:8,see:11,seed:[2,3,4,5,7,8,9,11],segment:9,self:[3,8],sequenc:[6,7],seri:[2,3,4,5,6,7,8,9,10,11],set:[2,3,10],setup:11,sever:11,shannon:9,shape:[2,6,8,9,10],sigma:10,sigma_1:10,sigma_2:10,sigma_m:10,sign:7,signal:[6,7,8,9,10,11],similar:[3,8],simon:3,sin:9,sine:9,singular:[1,10,11],size:[3,8,9],sklearn:8,slow:11,smaller:[2,8],softwar:[],some:11,sourc:[2,3,4,5,6,7,8,9,10],specifi:3,spectral:[1,9,11],spectral_entropi:11,spectrum:9,speed:4,standard:3,stanlei:3,stationari:3,statist:3,std:[2,3,8,11],str:[2,8,9],submit:11,subseri:3,sum:6,sum_:[9,10],svd:10,svd_e:10,svd_entropi:11,symposium:7,system:[5,6,7],taken:3,techniqu:2,term:3,than:11,theori:[4,5],therefor:[],thi:[3,6,11],time:[2,3,4,5,6,7,8,9,10],timeit:11,tomoyuki:4,tool:11,total:[5,9],transact:5,transform:9,translat:7,troubl:8,two:8,txt:11,type:8,unpredict:2,upper:6,use:[3,9],used:[2,3,7,8,11],uses:[4,9],using:[2,8,9],util:3,valid_metr:[2,8],vallat:11,valu:[1,2,3,6,8,10,11],vector:[8,9],veri:3,via:9,view:3,visit:11,vol:3,warranti:11,waveform:5,welch:[9,11],welcom:11,were:11,what:[],where:[3,5,6,7,8,9,10],wherea:8,which:3,window:3,within:3,word:10,x_i:[6,10],you:11,zero:[]},titles:["API reference","What\u2019s new","entropy.app_entropy","entropy.detrended_fluctuation","entropy.higuchi_fd","entropy.katz_fd","entropy.perm_entropy","entropy.petrosian_fd","entropy.sample_entropy","entropy.spectral_entropy","entropy.svd_entropy","Installation"],titleterms:{"function":11,"new":1,acknowledg:11,api:0,app_entropi:2,content:[],detrended_fluctu:3,develop:11,dimens:[0,11],entropi:[0,2,3,4,5,6,7,8,9,10,11],execut:11,fractal:[0,11],higuchi_fd:4,instal:11,katz_fd:5,measur:11,minut:[],octob:1,other:[0,11],perm_entropi:6,permut:[],petrosian_fd:7,quickstart:[],refer:0,sample_entropi:8,spectral_entropi:9,svd_entropi:10,time:11,what:1}})
\ No newline at end of file
diff --git a/docs/index.rst b/docs/index.rst
index 906edde..6645e7e 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -65,7 +65,7 @@ Entropy
0.9945519071575192
0.8482185855709181
2.0754913760787277
- 2.1919237573930315
+ 2.192416747827227
Fractal dimension
-----------------
@@ -123,7 +123,7 @@ Here are some benchmarks computed on an average PC (i7-7700HQ CPU @ 2.80 Ghz - 8
150 µs ± 859 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
42.4 µs ± 306 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
4.59 ms ± 62.2 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
- 4.61 ms ± 163 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)
+ 2.03 ms ± 39.5 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
16.4 µs ± 251 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
32.4 µs ± 578 ns per loop (mean ± std. dev. of 7 runs, 10000 loops each)
17.4 µs ± 274 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
diff --git a/entropy/entropy.py b/entropy/entropy.py
index 47a88fb..5e63583 100644
--- a/entropy/entropy.py
+++ b/entropy/entropy.py
@@ -1,5 +1,6 @@
import numpy as np
-from math import factorial
+from numba import jit
+from math import factorial, log
from sklearn.neighbors import KDTree
from scipy.signal import periodogram, welch
@@ -279,6 +280,61 @@ def _app_samp_entropy(x, order, metric='chebyshev', approximate=True):
return phi
+@jit('f8(f8[:], i4, f8)', nopython=True)
+def _numba_sampen(x, mm=2, r=0.2):
+ """
+ Fast evaluation of the sample entropy using Numba.
+ """
+ n = x.size
+ n1 = n - 1
+ mm += 1
+ mm_dbld = 2 * mm
+
+ # Define threshold
+ r *= x.std()
+
+ # initialize the lists
+ run = [0] * n
+ run1 = run[:]
+ r1 = [0] * (n * mm_dbld)
+ a = [0] * mm
+ b = a[:]
+ p = a[:]
+
+ for i in range(n1):
+ nj = n1 - i
+
+ for jj in range(nj):
+ j = jj + i + 1
+ if abs(x[j] - x[i]) < r:
+ run[jj] = run1[jj] + 1
+ m1 = mm if mm < run[jj] else run[jj]
+ for m in range(m1):
+ a[m] += 1
+ if j < n1:
+ b[m] += 1
+ else:
+ run[jj] = 0
+ for j in range(mm_dbld):
+ run1[j] = run[j]
+ r1[i + n * j] = run[j]
+ if nj > mm_dbld - 1:
+ for j in range(mm_dbld, nj):
+ run1[j] = run[j]
+
+ m = mm - 1
+
+ while m > 0:
+ b[m] = b[m - 1]
+ m -= 1
+
+ b[0] = n * n1 / 2
+ a = np.array([float(aa) for aa in a])
+ b = np.array([float(bb) for bb in b])
+ p = np.true_divide(a, b)
+ return -log(p[-1])
+
+
def app_entropy(x, order=2, metric='chebyshev'):
"""Approximate Entropy.
@@ -351,8 +407,6 @@ def sample_entropy(x, order=2, metric='chebyshev'):
Notes
-----
- Original code from the mne-features package.
-
Sample entropy is a modification of approximate entropy, used for assessing
the complexity of physiological time-series signals. It has two advantages
over approximate entropy: data length independence and a relatively
@@ -370,8 +424,11 @@ def sample_entropy(x, order=2, metric='chebyshev'):
:math:`C(m, r)` is the number of embedded vectors of length
:math:`m` having a Chebyshev distance inferior to :math:`r`.
- Code adapted from the mne-features package by Jean-Baptiste Schiratti
- and Alexandre Gramfort.
+ Note that if metric == 'chebyshev' and x.size < 5000 points, then the
+ sample entropy is computed using a fast custom Numba script. For other
+ metric types or longer time-series, the sample entropy is computed using
+ a code from the mne-features package by Jean-Baptiste Schiratti
+ and Alexandre Gramfort (requires sklearn).
References
----------
@@ -399,8 +456,10 @@ def sample_entropy(x, order=2, metric='chebyshev'):
>>> print(sample_entropy(x, order=3, metric='euclidean'))
2.725
"""
- phi = _app_samp_entropy(x, order=order, metric=metric, approximate=False)
- if np.allclose(phi[0], 0) or np.allclose(phi[1], 0):
- raise ValueError('Sample Entropy is not defined.')
+ x = np.asarray(x, dtype=np.float64)
+ if metric == 'chebyshev' and x.size < 5000:
+ return _numba_sampen(x, mm=order, r=0.2)
else:
+ phi = _app_samp_entropy(x, order=order, metric=metric,
+ approximate=False)
return -np.log(np.divide(phi[1], phi[0]))
diff --git a/entropy/tests/test_entropy.py b/entropy/tests/test_entropy.py
index 595ef26..a3529b6 100644
--- a/entropy/tests/test_entropy.py
+++ b/entropy/tests/test_entropy.py
@@ -6,6 +6,7 @@
np.random.seed(1234567)
RANDOM_TS = np.random.rand(3000)
+RANDOM_TS_LONG = np.random.rand(6000)
SF_TS = 100
BANDT_PERM = [4, 7, 9, 10, 6, 11, 3]
PURE_SINE = np.sin(2 * np.pi * 1 * np.arange(3000) / 100)
@@ -44,6 +45,7 @@ def test_svd_entropy(self):
def test_sample_entropy(self):
se = sample_entropy(RANDOM_TS, order=2)
+ sample_entropy(RANDOM_TS_LONG, order=2)
se_eu_3 = sample_entropy(RANDOM_TS, order=3, metric='euclidean')
# Compare with MNE-features
self.assertEqual(np.round(se, 3), 2.192)
@@ -52,8 +54,6 @@ def test_sample_entropy(self):
sample_entropy(RANDOM_TS, order=2, metric='euclidean')
with self.assertRaises(ValueError):
sample_entropy(RANDOM_TS, order=2, metric='wrong')
- with self.assertRaises(ValueError):
- sample_entropy(BANDT_PERM, order=2)
def test_app_entropy(self):
ae = app_entropy(RANDOM_TS, order=2)