Skip to content

Commit 71a3c9b

Browse files
committed
include note on memory usage
1 parent fd9fecf commit 71a3c9b

File tree

1 file changed

+68
-111
lines changed

1 file changed

+68
-111
lines changed

Examples_all_decoders.ipynb

Lines changed: 68 additions & 111 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,9 @@
1414
"5. Plot example decoded outputs\n",
1515
"\n",
1616
"See \"Examples_kf_decoder\" for a Kalman filter example. <br>\n",
17-
"Because the Kalman filter utilizes different preprocessing, we don't include an example here. to keep this notebook more understandable"
17+
"Because the Kalman filter utilizes different preprocessing, we don't include an example here. to keep this notebook more understandable\n",
18+
"\n",
19+
"We also include a note on memory usage for the neural net decoders at the end of #4"
1820
]
1921
},
2022
{
@@ -29,23 +31,14 @@
2931
{
3032
"cell_type": "code",
3133
"execution_count": 1,
32-
"metadata": {
33-
"collapsed": false
34-
},
34+
"metadata": {},
3535
"outputs": [
36-
{
37-
"name": "stdout",
38-
"output_type": "stream",
39-
"text": [
40-
"\n",
41-
"WARNING: statsmodels is not installed. You will be unable to use the Naive Bayes Decoder\n"
42-
]
43-
},
4436
{
4537
"name": "stderr",
4638
"output_type": "stream",
4739
"text": [
48-
"Using Theano backend.\n"
40+
"Using Theano backend.\n",
41+
"WARNING (theano.configdefaults): install mkl with `conda install mkl-service`: No module named 'mkl'\n"
4942
]
5043
}
5144
],
@@ -99,14 +92,12 @@
9992
{
10093
"cell_type": "code",
10194
"execution_count": 2,
102-
"metadata": {
103-
"collapsed": false
104-
},
95+
"metadata": {},
10596
"outputs": [],
10697
"source": [
107-
"folder='' #ENTER THE FOLDER THAT YOUR DATA IS IN\n",
98+
"# folder='' #ENTER THE FOLDER THAT YOUR DATA IS IN\n",
10899
"# folder='/home/jglaser/Data/DecData/' \n",
109-
"# folder='/Users/jig289/Dropbox/Public/Decoding_Data/'\n",
100+
"folder='/Users/jig289/Dropbox/Public/Decoding_Data/'\n",
110101
"\n",
111102
"with open(folder+'example_data_s1.pickle','rb') as f:\n",
112103
" neural_data,vels_binned=pickle.load(f,encoding='latin1') #If using python 3\n",
@@ -136,9 +127,7 @@
136127
{
137128
"cell_type": "code",
138129
"execution_count": 3,
139-
"metadata": {
140-
"collapsed": true
141-
},
130+
"metadata": {},
142131
"outputs": [],
143132
"source": [
144133
"bins_before=6 #How many bins of neural data prior to the output are used for decoding\n",
@@ -163,9 +152,7 @@
163152
{
164153
"cell_type": "code",
165154
"execution_count": 4,
166-
"metadata": {
167-
"collapsed": false
168-
},
155+
"metadata": {},
169156
"outputs": [],
170157
"source": [
171158
"# Format for recurrent neural networks (SimpleRNN, GRU, LSTM)\n",
@@ -187,9 +174,7 @@
187174
{
188175
"cell_type": "code",
189176
"execution_count": 5,
190-
"metadata": {
191-
"collapsed": false
192-
},
177+
"metadata": {},
193178
"outputs": [],
194179
"source": [
195180
"#Set decoding output\n",
@@ -215,9 +200,7 @@
215200
{
216201
"cell_type": "code",
217202
"execution_count": 6,
218-
"metadata": {
219-
"collapsed": true
220-
},
203+
"metadata": {},
221204
"outputs": [],
222205
"source": [
223206
"#Set what part of data should be part of the training/testing/validation sets\n",
@@ -236,9 +219,7 @@
236219
{
237220
"cell_type": "code",
238221
"execution_count": 7,
239-
"metadata": {
240-
"collapsed": false
241-
},
222+
"metadata": {},
242223
"outputs": [],
243224
"source": [
244225
"num_examples=X.shape[0]\n",
@@ -277,9 +258,7 @@
277258
{
278259
"cell_type": "code",
279260
"execution_count": 8,
280-
"metadata": {
281-
"collapsed": false
282-
},
261+
"metadata": {},
283262
"outputs": [],
284263
"source": [
285264
"#Z-score \"X\" inputs. \n",
@@ -322,7 +301,6 @@
322301
"cell_type": "code",
323302
"execution_count": 9,
324303
"metadata": {
325-
"collapsed": false,
326304
"scrolled": true
327305
},
328306
"outputs": [
@@ -359,15 +337,13 @@
359337
{
360338
"cell_type": "code",
361339
"execution_count": 10,
362-
"metadata": {
363-
"collapsed": false
364-
},
340+
"metadata": {},
365341
"outputs": [
366342
{
367343
"name": "stdout",
368344
"output_type": "stream",
369345
"text": [
370-
"('R2s:', array([ 0.73127717, 0.73370796]))\n"
346+
"R2s: [0.73127717 0.73370796]\n"
371347
]
372348
}
373349
],
@@ -395,16 +371,22 @@
395371
},
396372
{
397373
"cell_type": "code",
398-
"execution_count": 12,
399-
"metadata": {
400-
"collapsed": false
401-
},
374+
"execution_count": 11,
375+
"metadata": {},
402376
"outputs": [
377+
{
378+
"name": "stderr",
379+
"output_type": "stream",
380+
"text": [
381+
"/Users/jig289/anaconda/envs/decode/lib/python3.5/site-packages/xgboost/core.py:614: UserWarning: Use subset (sliced data) of np.ndarray is not recommended because it will generate extra copies and increase memory consumption\n",
382+
" \"because it will generate extra copies and increase memory consumption\")\n"
383+
]
384+
},
403385
{
404386
"name": "stdout",
405387
"output_type": "stream",
406388
"text": [
407-
"('R2s:', array([ 0.75403802, 0.76625732]))\n"
389+
"R2s: [0.75403802 0.76625732]\n"
408390
]
409391
}
410392
],
@@ -432,24 +414,24 @@
432414
},
433415
{
434416
"cell_type": "code",
435-
"execution_count": 13,
436-
"metadata": {
437-
"collapsed": false
438-
},
417+
"execution_count": 12,
418+
"metadata": {},
439419
"outputs": [
440420
{
441421
"name": "stderr",
442422
"output_type": "stream",
443423
"text": [
444-
"/opt/anaconda/anaconda2/lib/python2.7/site-packages/sklearn/svm/base.py:220: ConvergenceWarning: Solver terminated early (max_iter=4000). Consider pre-processing your data with StandardScaler or MinMaxScaler.\n",
424+
"/Users/jig289/anaconda/envs/decode/lib/python3.5/site-packages/sklearn/svm/base.py:244: ConvergenceWarning: Solver terminated early (max_iter=4000). Consider pre-processing your data with StandardScaler or MinMaxScaler.\n",
425+
" % self.max_iter, ConvergenceWarning)\n",
426+
"/Users/jig289/anaconda/envs/decode/lib/python3.5/site-packages/sklearn/svm/base.py:244: ConvergenceWarning: Solver terminated early (max_iter=4000). Consider pre-processing your data with StandardScaler or MinMaxScaler.\n",
445427
" % self.max_iter, ConvergenceWarning)\n"
446428
]
447429
},
448430
{
449431
"name": "stdout",
450432
"output_type": "stream",
451433
"text": [
452-
"('R2s:', array([ 0.81684722, 0.82538223]))\n"
434+
"R2s: [0.81684722 0.82538223]\n"
453435
]
454436
}
455437
],
@@ -484,24 +466,14 @@
484466
},
485467
{
486468
"cell_type": "code",
487-
"execution_count": 14,
488-
"metadata": {
489-
"collapsed": false
490-
},
469+
"execution_count": 13,
470+
"metadata": {},
491471
"outputs": [
492-
{
493-
"name": "stderr",
494-
"output_type": "stream",
495-
"text": [
496-
"/opt/anaconda/anaconda2/lib/python2.7/site-packages/keras/models.py:826: UserWarning: The `nb_epoch` argument in `fit` has been renamed `epochs`.\n",
497-
" warnings.warn('The `nb_epoch` argument in `fit` '\n"
498-
]
499-
},
500472
{
501473
"name": "stdout",
502474
"output_type": "stream",
503475
"text": [
504-
"('R2s:', array([ 0.82674219, 0.84643551]))\n"
476+
"R2s: [0.82578506 0.84818598]\n"
505477
]
506478
}
507479
],
@@ -529,24 +501,14 @@
529501
},
530502
{
531503
"cell_type": "code",
532-
"execution_count": 15,
533-
"metadata": {
534-
"collapsed": false
535-
},
504+
"execution_count": 14,
505+
"metadata": {},
536506
"outputs": [
537-
{
538-
"name": "stderr",
539-
"output_type": "stream",
540-
"text": [
541-
"decoders.py:433: UserWarning: Update your `SimpleRNN` call to the Keras 2 API: `SimpleRNN(400, recurrent_dropout=0, dropout=0, input_shape=(13, 52), activation=\"relu\")`\n",
542-
" model.add(SimpleRNN(self.units,input_shape=(X_train.shape[1],X_train.shape[2]),dropout_W=self.dropout,dropout_U=self.dropout,activation='relu')) #Within recurrent layer, include dropout\n"
543-
]
544-
},
545507
{
546508
"name": "stdout",
547509
"output_type": "stream",
548510
"text": [
549-
"('R2s:', array([ 0.82154092, 0.82119071]))\n"
511+
"R2s: [0.82405748 0.80346205]\n"
550512
]
551513
}
552514
],
@@ -574,24 +536,14 @@
574536
},
575537
{
576538
"cell_type": "code",
577-
"execution_count": 16,
578-
"metadata": {
579-
"collapsed": false
580-
},
539+
"execution_count": 15,
540+
"metadata": {},
581541
"outputs": [
582-
{
583-
"name": "stderr",
584-
"output_type": "stream",
585-
"text": [
586-
"decoders.py:512: UserWarning: Update your `GRU` call to the Keras 2 API: `GRU(400, dropout=0, recurrent_dropout=0, input_shape=(13, 52))`\n",
587-
" model.add(GRU(self.units,input_shape=(X_train.shape[1],X_train.shape[2]),dropout_W=self.dropout,dropout_U=self.dropout)) #Within recurrent layer, include dropout\n"
588-
]
589-
},
590542
{
591543
"name": "stdout",
592544
"output_type": "stream",
593545
"text": [
594-
"('R2s:', array([ 0.84200426, 0.83707654]))\n"
546+
"R2s: [0.83770423 0.83575681]\n"
595547
]
596548
}
597549
],
@@ -619,24 +571,14 @@
619571
},
620572
{
621573
"cell_type": "code",
622-
"execution_count": 17,
623-
"metadata": {
624-
"collapsed": false
625-
},
574+
"execution_count": 16,
575+
"metadata": {},
626576
"outputs": [
627-
{
628-
"name": "stderr",
629-
"output_type": "stream",
630-
"text": [
631-
"decoders.py:591: UserWarning: Update your `LSTM` call to the Keras 2 API: `LSTM(400, dropout=0, recurrent_dropout=0, input_shape=(13, 52))`\n",
632-
" model.add(LSTM(self.units,input_shape=(X_train.shape[1],X_train.shape[2]),dropout_W=self.dropout,dropout_U=self.dropout)) #Within recurrent layer, include dropout\n"
633-
]
634-
},
635577
{
636578
"name": "stdout",
637579
"output_type": "stream",
638580
"text": [
639-
"('R2s:', array([ 0.85177274, 0.85034613]))\n"
581+
"R2s: [0.84809856 0.84108359]\n"
640582
]
641583
}
642584
],
@@ -655,6 +597,23 @@
655597
"print('R2s:', R2s_lstm)"
656598
]
657599
},
600+
{
601+
"cell_type": "markdown",
602+
"metadata": {},
603+
"source": [
604+
"### 4 - Side note on memory usage in TensorFlow\n",
605+
"When using the tensorflow backend for Keras (which is standard in newer versions), there can be issues with memory leakage, particularly when fitting many models. To avoid this problem, models can be deleted with the following code:\n",
606+
"\n",
607+
"```\n",
608+
"import gc\n",
609+
"from keras import backend as K\n",
610+
"\n",
611+
"del model_lstm\n",
612+
"K.clear_session()\n",
613+
"gc.collect()\n",
614+
"```"
615+
]
616+
},
658617
{
659618
"cell_type": "markdown",
660619
"metadata": {},
@@ -665,9 +624,7 @@
665624
{
666625
"cell_type": "code",
667626
"execution_count": 18,
668-
"metadata": {
669-
"collapsed": false
670-
},
627+
"metadata": {},
671628
"outputs": [
672629
{
673630
"data": {
@@ -714,9 +671,9 @@
714671
"metadata": {
715672
"anaconda-cloud": {},
716673
"kernelspec": {
717-
"display_name": "Python [py35]",
674+
"display_name": "Python 3",
718675
"language": "python",
719-
"name": "Python [py35]"
676+
"name": "python3"
720677
},
721678
"language_info": {
722679
"codemirror_mode": {
@@ -728,9 +685,9 @@
728685
"name": "python",
729686
"nbconvert_exporter": "python",
730687
"pygments_lexer": "ipython3",
731-
"version": "3.5.2"
688+
"version": "3.5.4"
732689
}
733690
},
734691
"nbformat": 4,
735-
"nbformat_minor": 0
692+
"nbformat_minor": 1
736693
}

0 commit comments

Comments
 (0)