Skip to content

Commit 684645e

Browse files
committed
notebooks: add a bunch of bistability experiments
1 parent 8eaa0e2 commit 684645e

File tree

5 files changed

+861
-0
lines changed

5 files changed

+861
-0
lines changed
Lines changed: 143 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,143 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "code",
5+
"execution_count": null,
6+
"metadata": {},
7+
"outputs": [],
8+
"source": [
9+
"%load_ext autoreload\n",
10+
"%autoreload 2\n",
11+
"\n",
12+
"import numpy as np\n",
13+
"import matplotlib.pyplot as plt\n",
14+
"\n",
15+
"from pim.models.network2 import RecurrentNetwork, Layer, InputLayer, Connection, WeightedConnection\n",
16+
"from pim.models.new.stone import tb1_output, tn1_output, tn2_output, cpu4_output, cpu1_output, motor_output, CentralComplex"
17+
]
18+
},
19+
{
20+
"cell_type": "code",
21+
"execution_count": null,
22+
"metadata": {},
23+
"outputs": [],
24+
"source": [
25+
"def linear_neuron(gain, gain_noise, noise):\n",
26+
" gain = np.random.normal(gain, gain_noise)\n",
27+
" def f(x):\n",
28+
" return gain * x + np.random.normal(0, noise)\n",
29+
" return f\n",
30+
"\n",
31+
"def bistable_neuron(Idown, Iup, gain, gain_noise, noise):\n",
32+
" gain = np.random.normal(gain, gain_noise)\n",
33+
" state = 0\n",
34+
" def f(x):\n",
35+
" nonlocal state\n",
36+
" if x >= Iup:\n",
37+
" state = 1\n",
38+
" elif x <= Idown:\n",
39+
" state = 0 \n",
40+
" return state * gain * x + np.random.normal(0, noise)\n",
41+
" return f"
42+
]
43+
},
44+
{
45+
"cell_type": "code",
46+
"execution_count": null,
47+
"metadata": {},
48+
"outputs": [],
49+
"source": [
50+
"class BistableLayer(Layer):\n",
51+
" def __init__(self, name, N, dI, mI, gain, gain_noise, noise):\n",
52+
" super().__init__(name)\n",
53+
" self.N = N\n",
54+
" self.mI = mI\n",
55+
" self.gain = np.random.normal(gain, gain_noise)\n",
56+
" self.noise = noise\n",
57+
" self.neurons = [bistable_neuron(I_up-dI, I_up, 1.0, 0.0, noise) for I_up in np.linspace((mI-dI)/N, mI, N)]\n",
58+
" \n",
59+
" def update(self, x):\n",
60+
" activity = np.array([neuron(x) for neuron in self.neurons])\n",
61+
" num_active = np.sum(activity > 0.5)\n",
62+
" return num_active / self.N * self.mI * self.gain + np.random.normal(0.0, self.noise)\n",
63+
"\n",
64+
"class LinearLayer(Layer):\n",
65+
" def __init__(self, name, gain, gain_noise, noise):\n",
66+
" super().__init__(name)\n",
67+
" self.neuron = linear_neuron(gain, gain_noise, noise)\n",
68+
" \n",
69+
" def update(self, x):\n",
70+
" return self.neuron(x)\n",
71+
" \n",
72+
"def step(network, input_layer, memory, i):\n",
73+
" input_layer.set(i)\n",
74+
" network.step()\n",
75+
" return memory.output\n"
76+
]
77+
},
78+
{
79+
"cell_type": "code",
80+
"execution_count": null,
81+
"metadata": {},
82+
"outputs": [],
83+
"source": [
84+
"T = np.linspace(0, 20, 100)\n",
85+
"I = np.zeros(T.size)\n",
86+
"I[(0.2 < T) & (T < 0.3)] = 1.0\n",
87+
"I[(2.5 < T) & (T < 3.5)] = 1.0\n",
88+
"I[(7.5 < T) & (T < 8)] = -1.0\n",
89+
"I[(12 < T) & (T < 15)] = (T[(12 < T) & (T < 15)]-12)*0.05\n",
90+
"\n",
91+
"plt.figure()\n",
92+
"plt.plot(T, I)\n",
93+
"plt.plot(T, np.cumsum(I))\n",
94+
"plt.show()"
95+
]
96+
},
97+
{
98+
"cell_type": "code",
99+
"execution_count": null,
100+
"metadata": {},
101+
"outputs": [],
102+
"source": [
103+
"network = RecurrentNetwork()\n",
104+
"\n",
105+
"input_layer = network.add_layer(InputLayer(\"in\"))\n",
106+
"memory = network.add_layer(BistableLayer(\"mem\", 40, 0.1, 10, 1.00, 0.00, 0.05))\n",
107+
"#memory = network.add_layer(LinearLayer(\"mem\", 1.0, 0.00, 0.05))\n",
108+
"\n",
109+
"network.add_connection(WeightedConnection(\"in\", \"mem\", 1.00))\n",
110+
"network.add_connection(WeightedConnection(\"mem\", \"mem\", 1.03))\n",
111+
"\n",
112+
"output = np.array([step(network, input_layer, memory, i) for i in I])\n",
113+
"\n",
114+
"plt.figure()\n",
115+
"plt.plot(T, I, label=\"input\")\n",
116+
"plt.plot(T, np.cumsum(I), label=\"true integral\")\n",
117+
"plt.plot(T, output, label=\"memory\")\n",
118+
"plt.legend()"
119+
]
120+
}
121+
],
122+
"metadata": {
123+
"kernelspec": {
124+
"display_name": "pim",
125+
"language": "python",
126+
"name": "pim"
127+
},
128+
"language_info": {
129+
"codemirror_mode": {
130+
"name": "ipython",
131+
"version": 3
132+
},
133+
"file_extension": ".py",
134+
"mimetype": "text/x-python",
135+
"name": "python",
136+
"nbconvert_exporter": "python",
137+
"pygments_lexer": "ipython3",
138+
"version": "3.10.4"
139+
}
140+
},
141+
"nbformat": 4,
142+
"nbformat_minor": 4
143+
}

notebooks/bistable/bistable.ipynb

Lines changed: 117 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,117 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "code",
5+
"execution_count": null,
6+
"id": "4c160e3b-e582-41bb-837f-8243f7810d98",
7+
"metadata": {},
8+
"outputs": [],
9+
"source": [
10+
"import numpy as np\n",
11+
"import plotly.express as px\n",
12+
"import matplotlib.pyplot as plt"
13+
]
14+
},
15+
{
16+
"cell_type": "code",
17+
"execution_count": null,
18+
"id": "45ac964e-91fb-4992-9b34-90b12a47527c",
19+
"metadata": {},
20+
"outputs": [],
21+
"source": [
22+
"class BistableNeuron:\n",
23+
" def __init__(self, Idown, Iup, noise = 0.05):\n",
24+
" self.Iup = Iup\n",
25+
" self.Idown = Idown\n",
26+
" self.state = 0\n",
27+
" self.noise = noise\n",
28+
" \n",
29+
" def update(self, I):\n",
30+
" if I > self.Iup:\n",
31+
" self.state = 1\n",
32+
" elif I < self.Idown:\n",
33+
" self.state = 0\n",
34+
" return self.state * 1.0 * I + np.random.normal(0, self.noise)\n",
35+
" \n",
36+
"def simulate(neuron, T, I):\n",
37+
" return np.array(\n",
38+
" [neuron.update(i) for i in I]\n",
39+
" )"
40+
]
41+
},
42+
{
43+
"cell_type": "code",
44+
"execution_count": null,
45+
"id": "d3c24b72-230f-4657-8493-57ac479bccc9",
46+
"metadata": {},
47+
"outputs": [],
48+
"source": [
49+
"neuron = BistableNeuron(0.4, 0.6)\n",
50+
"\n",
51+
"T = np.linspace(0, 2, 100)\n",
52+
"\n",
53+
"def I(t):\n",
54+
" I = t.copy()\n",
55+
" I[t > 1] = (2 - t)[t > 1]\n",
56+
" return I\n",
57+
"\n",
58+
"plt.figure()\n",
59+
"plt.plot(T, I(T), label=\"input\")\n",
60+
"plt.plot(T, np.ones(T.size) * neuron.Idown, label=\"I_down\")\n",
61+
"plt.plot(T, np.ones(T.size) * neuron.Iup, label=\"I_up\")\n",
62+
"\n",
63+
"#plt.figure()\n",
64+
"y = simulate(neuron, T, I(T))\n",
65+
"plt.plot(T, y, label=\"output\")\n",
66+
"plt.xlabel(\"time\")\n",
67+
"plt.ylabel(\"activity\")\n",
68+
"\n",
69+
"plt.legend()"
70+
]
71+
},
72+
{
73+
"cell_type": "code",
74+
"execution_count": null,
75+
"id": "79a3eee1-6bc4-4928-b621-6c3fc20fc2a7",
76+
"metadata": {},
77+
"outputs": [],
78+
"source": [
79+
"N = 40\n",
80+
"dI = 0.05\n",
81+
"neurons = [BistableNeuron(I, I+dI, noise=0.01) for n in range(0, N) for I in (np.random.uniform(0-dI, 1-dI),)]\n",
82+
"\n",
83+
"plt.figure()\n",
84+
"n = np.zeros(T.size)\n",
85+
"for neuron in neurons:\n",
86+
" y = simulate(neuron, T, I(T))\n",
87+
" n += y > I(T) / 2 + 0.05\n",
88+
" plt.plot(T, y)\n",
89+
" \n",
90+
"plt.figure()\n",
91+
"plt.plot(T, I(T))\n",
92+
"plt.plot(T, n / N)\n"
93+
]
94+
}
95+
],
96+
"metadata": {
97+
"kernelspec": {
98+
"display_name": "pim",
99+
"language": "python",
100+
"name": "pim"
101+
},
102+
"language_info": {
103+
"codemirror_mode": {
104+
"name": "ipython",
105+
"version": 3
106+
},
107+
"file_extension": ".py",
108+
"mimetype": "text/x-python",
109+
"name": "python",
110+
"nbconvert_exporter": "python",
111+
"pygments_lexer": "ipython3",
112+
"version": "3.10.4"
113+
}
114+
},
115+
"nbformat": 4,
116+
"nbformat_minor": 5
117+
}

0 commit comments

Comments
 (0)