Skip to content

Commit

Permalink
Improved temporal pattern code: Correctly handle non-normalized input…
Browse files Browse the repository at this point in the history
…s and linear activation functions; implement low-precision activations; more efficient computation of the number of ops.
  • Loading branch information
rbodo committed Jun 3, 2020
1 parent 420c5c8 commit 2cd4ca6
Show file tree
Hide file tree
Showing 3 changed files with 60 additions and 59 deletions.
65 changes: 18 additions & 47 deletions snntoolbox/simulation/backends/inisim/temporal_pattern.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
@author: rbodo
"""

import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, AveragePooling2D, Layer, \
MaxPooling2D, Conv2D, Concatenate, DepthwiseConv2D, Reshape, ZeroPadding2D
Expand Down Expand Up @@ -80,31 +79,37 @@ def spike_call(self, x, call):
self._a = tf.Variable(lambda: tf.zeros_like(x), name='activation',
trainable=False)

# In case of centered input layer, some x values could be negative.
# If not using ReLU, some x values could be negative.
# Remove and store signs to apply after binarization.
signs = tf.sign(x)
x = tf.abs(x)

# Make sure x is normalized before binarization.
x_max = tf.reduce_max(x)
x = tf.divide(x, x_max)
# Make sure input is normalized before binarization. Hidden layers are
# normalized during parsing.
if self.is_first_spiking:
x_max = tf.reduce_max(x)
x = tf.divide(x, x_max)
else:
x_max = 1

# Transform x into binary format here. Effective batch_size increases
# from 1 to num_bits.
x_b = self.to_binary(x)
x = self.to_binary(x)

# Apply signs and rescale back to original range.
x_b = tf.multiply(x_b, signs * x_max)
x = tf.multiply(x, signs * x_max)

# Perform layer operation, e.g. convolution, on every power of 2.
x_b = call(self, x_b)
y = call(self, x)

# Add up the weighted powers of 2 to recover the activation values.
y = tf.reduce_sum(x_b, 0, keepdims=True)
y = tf.reduce_sum(y, 0, keepdims=True)

# Apply non-linearity.
y = tf.nn.softmax(y) if self.activation_str == 'softmax' \
else tf.nn.relu(y)
if self.activation_str == 'softmax':
y = tf.nn.softmax(y)
elif self.activation_str == 'relu':
y = tf.nn.relu(y)

self.spikerates.assign(y)

Expand All @@ -130,7 +135,8 @@ def to_binary(self, x):
``x`` is distributed across the first dimension of ``x_binary``.
"""

self._a.assign(x)
n = 2 ** self.num_bits - 1
self._a.assign(tf.divide(tf.round(tf.multiply(x, n)), n))

for i in tf.range(self.num_bits):
mask = tf.cast(tf.greater(self._a, self.powers[i]), tf.float32)
Expand All @@ -143,41 +149,6 @@ def to_binary(self, x):
return self._x_binary


def to_binary_numpy(x, num_bits):
"""Transform an array of floats into binary representation.
Parameters
----------
x: ndarray
Input array containing float values. The first dimension has to be of
length 1.
num_bits: int
The fixed point precision to be used when converting to binary.
Returns
-------
binary_array: ndarray
Output boolean array. The first dimension of x is expanded to length
``bits``. The binary representation of each value in ``x`` is
distributed across the first dimension of ``binary_array``.
"""

x_binary = np.zeros([num_bits] + list(x.shape[1:]))

powers = [2**-(i+1) for i in range(num_bits)]

a = np.copy(x)

for i in range(num_bits):
mask = np.greater(a, powers[i])
x_binary[i] = mask
a -= mask * powers[i]

return x_binary


class SpikeConcatenate(Concatenate):
"""Spike merge layer"""

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from tensorflow import keras
import numpy as np

from snntoolbox.parsing.utils import get_inbound_layers_with_params
from snntoolbox.simulation.utils import AbstractSNN, remove_name_counter

remove_classifier = False
Expand Down Expand Up @@ -84,6 +85,8 @@ def add_layer(self, layer):

spike_layer = spike_layer_name(**layer_kwargs)
spike_layer.activation_str = activation_str
spike_layer.is_first_spiking = \
len(get_inbound_layers_with_params(layer)) == 0
self._spiking_layers[layer.name] = spike_layer(inbound)

def build_dense(self, layer):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,8 @@ def simulate(self, **kwargs):
# Excludes Input, Flatten, Concatenate, etc:
if hasattr(layer, 'spikerates') and layer.spikerates is not None:
spikerates_b_l = layer.spikerates.numpy()
spiketrains_b_l_t = self.spikerates_to_trains(spikerates_b_l)
spiketrains_b_l_t = to_binary_numpy(spikerates_b_l,
self.num_bits)
self.set_spikerates(spikerates_b_l, i)
self.set_spiketrains(spiketrains_b_l_t, i)
if self.synaptic_operations_b_t is not None:
Expand Down Expand Up @@ -127,14 +128,40 @@ def set_neuron_operations(self, i):

def set_synaptic_operations(self, spiketrains_b_l_t, i):
for t in range(self.synaptic_operations_b_t.shape[-1]):
self.synaptic_operations_b_t[:, t] += 2 * \
get_layer_synaptic_operations(
spiketrains_b_l_t[Ellipsis, t], self.fanout[i + 1])

def spikerates_to_trains(self, spikerates_b_l):
x = self.sim.to_binary_numpy(spikerates_b_l, self.num_bits)
shape = [self.num_bits] + [1] * (x.ndim - 1)
x *= np.resize(np.arange(self.num_bits), shape)
perm = (1, 2, 3, 0) if len(x.shape) > 2 else (1, 0)
spiketrains_b_l_t = np.expand_dims(np.transpose(x, perm), 0)
return spiketrains_b_l_t
ops = get_layer_synaptic_operations(spiketrains_b_l_t[Ellipsis, t],
self.fanout[i + 1])
self.synaptic_operations_b_t[:, t] += 2 * ops


def to_binary_numpy(x, num_bits):
"""Transform an array of floats into binary representation.
Parameters
----------
x: ndarray
Input array containing float values. The first dimension has to be of
length 1.
num_bits: int
The fixed point precision to be used when converting to binary.
Returns
-------
y: ndarray
Output array with same shape as ``x`` except that an axis is added to
the last dimension with size ``num_bits``. The binary representation of
each value in ``x`` is distributed across the last dimension of ``y``.
"""

n = 2 ** num_bits - 1
a = np.round(x * n) / n

y = np.zeros(list(x.shape) + [num_bits])
for i in range(num_bits):
p = 2 ** -(i + 1)
b = np.greater(a, p) * p
y[Ellipsis, i] = b
a -= b

return y

0 comments on commit 2cd4ca6

Please sign in to comment.