This supplementary information presents :
%matplotlib inline
%load_ext autoreload
%autoreload 2
A convenience script model.py
allows to run and cache most learning items in this notebooks:
%run model.py
tag = HULK n_jobs = 0
from shl_scripts.shl_experiments import SHL
shl = SHL(**opts)
data = shl.get_data(matname=tag)
shl?
Type: SHL String form: <shl_scripts.shl_experiments.SHL object at 0x1134c3910> File: ~/science/HULK/SparseHebbianLearning/shl_scripts/shl_experiments.py Docstring: Base class to define SHL experiments: - initialization - coding and learning - visualization - quantitative analysis
print('# of pixels per patch =', shl.patch_width**2)
# of pixels per patch = 441
print('number of patches, size of patches = ', data.shape)
print('average of patches = ', data.mean(), ' +/- ', data.mean(axis=1).std())
SE = np.sqrt(np.mean(data**2, axis=1))
print('average energy of data = ', SE.mean(), '+/-', SE.std())
number of patches, size of patches = (65520, 441) average of patches = -4.1888600727021664e-05 +/- 0.006270387629074682 average energy of data = 0.26082782604823146 +/- 0.07415089441760706
#!ls -l {shl.cache_dir}/
#!ls -l {shl.cache_dir}/{tag}*
#!ls -ltr {shl.cache_dir}/{tag}*lock*
#!rm {shl.cache_dir}/{tag}*lock*
#!rm {shl.cache_dir}/{tag}*
#!rm {shl.cache_dir}/{tag}*HAP_seed*
#!ls -l {shl.cache_dir}/{tag}*
#!ls -ltr {shl.cache_dir}/{tag}*lock*
fname = 'figure_map'
# we cross-validate with 10 different learnings
one_cv = 8 # and pick one at random to display intermediate results
The actual learning is done in a second object (here dico
) from which we can access another set of properties and functions (see the shl_learn.py script):
homeo_methods = ['None', 'OLS', 'HEH']
list_figures = ['show_dico', 'time_plot_error', 'time_plot_logL', 'time_plot_MC', 'show_Pcum']
list_figures = []
dico = {}
for i_cv in range(N_cv):
dico[i_cv] = {}
for homeo_method in homeo_methods:
shl = SHL(homeo_method=homeo_method, seed=seed+i_cv, **opts)
dico[i_cv][homeo_method] = shl.learn_dico(data=data, list_figures=list_figures, matname=tag + '_' + homeo_method + '_seed=' + str(seed+i_cv))
list_figures = ['show_dico']
for i_cv in [one_cv]:
for homeo_method in homeo_methods:
print(hl + hs + homeo_method[:3] + hs + hl)
shl = SHL(homeo_method=homeo_method, seed=seed+i_cv, **opts)
shl.learn_dico(data=data, list_figures=list_figures, matname=tag + '_' + homeo_method + '_seed=' + str(seed+i_cv))
print('size of dictionary = (number of filters, size of imagelets) = ', dico[i_cv][homeo_method].dictionary.shape)
print('average of filters = ', dico[i_cv][homeo_method].dictionary.mean(axis=1).mean(),
'+/-', dico[i_cv][homeo_method].dictionary.mean(axis=1).std())
SE = np.sqrt(np.sum(dico[i_cv][homeo_method].dictionary**2, axis=1))
print('average energy of filters = ', SE.mean(), '+/-', SE.std())
plt.show()
---------- Non ---------- size of dictionary = (number of filters, size of imagelets) = (676, 441) average of filters = -7.038410893224767e-06 +/- 0.0008419021277793694 average energy of filters = 1.0 +/- 3.866729645080236e-17
---------- OLS ---------- size of dictionary = (number of filters, size of imagelets) = (676, 441) average of filters = 1.8871620517154972e-05 +/- 0.0007995474521857563 average energy of filters = 1.0 +/- 4.0734048673293375e-17
---------- HEH ---------- size of dictionary = (number of filters, size of imagelets) = (676, 441) average of filters = -2.9411542274321333e-05 +/- 0.0008106530645520307 average energy of filters = 1.0 +/- 4.312578046109635e-17
pname = '/tmp/panel_A' #pname = fname + '_A'
from shl_scripts import show_dico
if DEBUG: show_dico(shl, dico[one_cvi_cv][homeo_method], data=data, dim_graph=(2,5))
dim_graph = (2, 9)
colors = ['black', 'orange', 'blue']
homeo_methods
['None', 'OLS', 'HEH']
%run model.py
tag = HULK n_jobs = 0
<Figure size 432x288 with 0 Axes>
subplotpars = dict(left=0.042, right=1., bottom=0., top=1., wspace=0.05, hspace=0.05,)
fig, axs = plt.subplots(3, 1, figsize=(fig_width/2, fig_width/(1+phi)), gridspec_kw=subplotpars)
for ax, color, homeo_method in zip(axs.ravel(), colors, homeo_methods):
ax.axis(c=color, lw=2, axisbg='w')
ax.set_facecolor('w')
fig, ax = show_dico(shl, dico[one_cv][homeo_method], data=data, dim_graph=dim_graph, fig=fig, ax=ax)
# ax.set_ylabel(homeo_method)
ax.text(-10, 29, homeo_method, fontsize=12, color=color, rotation=90)#, backgroundcolor='white'
for ext in FORMATS: fig.savefig(pname + ext, dpi=dpi_export, bbox_inches='tight')
fig.savefig('graphical_abstract.png', dpi=dpi_export, bbox_inches='tight')
if DEBUG: Image(pname +'.png')
if DEBUG: help(fig.subplots_adjust)
if DEBUG: help(plt.subplots)
if DEBUG: help(matplotlib.gridspec.GridSpec)
pname = '/tmp/panel_B' #fname + '_B'
Flim1, Flim2 = .475, .626
from shl_scripts import time_plot
variable = 'F'
alpha_0, alpha = .3, .15
subplotpars = dict(left=0.2, right=.95, bottom=0.2, top=.95)#, wspace=0.05, hspace=0.05,)
fig, ax = plt.subplots(1, 1, figsize=(fig_width/2, fig_width/(1+phi)), gridspec_kw=subplotpars)
for i_cv in range(N_cv):
for color, homeo_method in zip(colors, homeo_methods):
ax.axis(c='b', lw=2, axisbg='w')
ax.set_facecolor('w')
if i_cv==0:
fig, ax = time_plot(shl, dico[i_cv][homeo_method], variable=variable, unit='bits', color=color, label=homeo_method, alpha=alpha_0, fig=fig, ax=ax)
else:
fig, ax = time_plot(shl, dico[i_cv][homeo_method], variable=variable, unit='bits', color=color, alpha=alpha, fig=fig, ax=ax)
# ax.set_ylabel(homeo_method)
#ax.text(-8, 7*dim_graph[0], homeo_method, fontsize=12, color='k', rotation=90)#, backgroundcolor='white'
ax.legend(loc='best')
ax.set_ylim(Flim1, Flim2)
for ext in FORMATS: fig.savefig(pname + ext, dpi=dpi_export, bbox_inches='tight')
if DEBUG: Image(pname +'.png')
import tikzmagic
%load_ext tikzmagic
#DEBUG = True
if DEBUG: help(tikzmagic)
%%tikz -f pdf --save {fname}.pdf
\draw[white, fill=white] (0.\linewidth,0) rectangle (1.\linewidth, .382\linewidth) ;
\draw [anchor=north west] (.0\linewidth, .382\linewidth) node {\includegraphics[width=.5\linewidth]{/tmp/panel_A}};
\draw [anchor=north west] (.5\linewidth, .382\linewidth) node {\includegraphics[width=.5\linewidth]{/tmp/panel_B}};
\begin{scope}[font=\bf\sffamily\large]
\draw [anchor=west,fill=white] (.0\linewidth, .382\linewidth) node [above right=-3mm] {$\mathsf{A}$};
\draw [anchor=west,fill=white] (.53\linewidth, .382\linewidth) node [above right=-3mm] {$\mathsf{B}$};
\end{scope}
!convert -density {dpi_export} {fname}.pdf {fname}.jpg
!convert -density {dpi_export} {fname}.pdf {fname}.png
#!convert -density {dpi_export} -resize 5400 -units pixelsperinch -flatten -compress lzw -depth 8 {fname}.pdf {fname}.tiff
Image(fname +'.png')
fname = 'figure_HEH'
First collecting data:
list_figures = ['show_Pcum']
dico = {}
for homeo_method in homeo_methods:
print(hl + hs + homeo_method + hs + hl)
shl = SHL(homeo_method=homeo_method, **opts)
#dico[homeo_method] = shl.learn_dico(data=data, list_figures=list_figures, matname=tag + '_' + homeo_method + '_' + str(one_cv))
dico[homeo_method] = shl.learn_dico(data=data, list_figures=list_figures, matname=tag + '_' + homeo_method + '_seed=' + str(seed+one_cv))
plt.show()
---------- None ----------
---------- OLS ----------
---------- HEH ----------
---------- HAP ----------
---------- EMP ----------
dico[homeo_method].P_cum.shape
(676, 128)
pname = '/tmp/panel_A' #pname = fname + '_A'
from shl_scripts import plot_P_cum
#variable = 'F'
subplotpars = dict(left=0.2, right=.95, bottom=0.2, top=.95)#, wspace=0.05, hspace=0.05,)
fig, ax = plt.subplots(1, 1, figsize=(fig_width/2, fig_width/(1+phi)), gridspec_kw=subplotpars)
for color, homeo_method in zip(colors, homeo_methods):
ax.axis(c='b', lw=2, axisbg='w')
ax.set_facecolor('w')
fig, ax = plot_P_cum(dico[homeo_method].P_cum, ymin=0.93, ymax=1.001,
title=None, suptitle=None, ylabel='non-linear functions',
verbose=False, n_yticks=21, alpha=.02, c=color, fig=fig, ax=ax)
ax.plot([0], [0], lw=1, color=color, label=homeo_method, alpha=.6)
# ax.set_ylabel(homeo_method)
#ax.text(-8, 7*dim_graph[0], homeo_method, fontsize=12, color='k', rotation=90)#, backgroundcolor='white'
ax.legend(loc='lower right')
for ext in FORMATS: fig.savefig(pname + ext, dpi=dpi_export, bbox_inches='tight')
if DEBUG: Image(pname +'.png')
if DEBUG: help(fig.legend)
pname = '/tmp/panel_B' #fname + '_B'
n_jobs = 1
from shl_scripts.shl_experiments import SHL_set
homeo_methods = ['None', 'OLS', 'HEH']
variables = ['eta', 'eta_homeo']
#latex_variables = [r'$\eta$', r'$\eta_\textnormal{homeo}$']
list_figures = []
for homeo_method in homeo_methods:
opts_ = opts.copy()
opts_.update(homeo_method=homeo_method)
experiments = SHL_set(opts_, tag=tag + '_' + homeo_method, base=10)
experiments.run(variables=variables, n_jobs=n_jobs, verbose=0)
import matplotlib.pyplot as plt
subplotpars = dict(left=0.2, right=.95, bottom=0.05, top=.95, wspace=0.5, hspace=0.6,)
x, y = .05, .8 #-.3
fig, axs = plt.subplots(len(variables), 1, figsize=(fig_width/2, fig_width/(1.3+phi)), gridspec_kw=subplotpars, sharey=True)
for i_ax, variable in enumerate(variables):
for color, homeo_method in zip(colors, homeo_methods):
opts_ = opts.copy()
opts_.update(homeo_method=homeo_method)
experiments = SHL_set(opts_, tag=tag + '_' + homeo_method, base=10)
fig, axs[i_ax] = experiments.scan(variable=variable, list_figures=[], display='final', fig=fig, ax=axs[i_ax], color=color, display_variable='F', verbose=0) #, label=homeo_metho
#axs[i_ax].set_xlabel(latex_variables[i_ax]) #variable
#axs[i_ax].text(x, y, variable, transform=axs[i_ax].transAxes)
#axs[i_ax].get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
axs[i_ax].set_ylim(Flim1, Flim2)
axs[0].xaxis.set_label_coords(0.5,-.325)
#fig.legend(loc='lower right')
for ext in FORMATS: fig.savefig(pname + ext, dpi=dpi_export, bbox_inches='tight')
if DEBUG: Image(pname +'.png')
%%tikz -f pdf --save {fname}.pdf
\draw[white, fill=white] (0.\linewidth,0) rectangle (1.\linewidth, .382\linewidth) ;
\draw [anchor=north west] (.0\linewidth, .382\linewidth) node {\includegraphics[width=.5\linewidth]{/tmp/panel_A.pdf}};
\draw [anchor=north west] (.5\linewidth, .382\linewidth) node {\includegraphics[width=.465\linewidth]{/tmp/panel_B.pdf}};
\begin{scope}[font=\bf\sffamily\large]
\draw [anchor=west,fill=white] (.0\linewidth, .382\linewidth) node [above right=-3mm] {$\mathsf{A}$};
\draw [anchor=west,fill=white] (.53\linewidth, .382\linewidth) node [above right=-3mm] {$\mathsf{B}$};
\end{scope}
!convert -density {dpi_export} {fname}.pdf {fname}.jpg
!convert -density {dpi_export} {fname}.pdf {fname}.png
#!convert -density {dpi_export} -resize 5400 -units pixelsperinch -flatten -compress lzw -depth 8 {fname}.pdf {fname}.tiff
Image(fname +'.png')
fname = 'figure_HAP'
colors = ['orange', 'blue', 'red', 'green']
homeo_methods = ['OLS', 'HEH', 'EMP', 'HAP']
list_figures = []
dico = {}
for i_cv in range(N_cv):
dico[i_cv] = {}
for homeo_method in homeo_methods:
shl = SHL(homeo_method=homeo_method, seed=seed+i_cv, **opts)
dico[i_cv][homeo_method] = shl.learn_dico(data=data, list_figures=list_figures, matname=tag + '_' + homeo_method + '_seed=' + str(seed+i_cv))
list_figures = ['show_dico'] if DEBUG else []
for i_cv in [one_cv]:
for homeo_method in homeo_methods:
print(hl + hs + homeo_method + hs + hl)
shl = SHL(homeo_method=homeo_method, seed=seed+i_cv, **opts)
shl.learn_dico(data=data, list_figures=list_figures, matname=tag + '_' + homeo_method + '_seed=' + str(seed+i_cv))
plt.show()
print('size of dictionary = (number of filters, size of imagelets) = ', dico[i_cv][homeo_method].dictionary.shape)
print('average of filters = ', dico[i_cv][homeo_method].dictionary.mean(axis=1).mean(),
'+/-', dico[i_cv][homeo_method].dictionary.mean(axis=1).std())
SE = np.sqrt(np.sum(dico[i_cv][homeo_method].dictionary**2, axis=1))
print('average energy of filters = ', SE.mean(), '+/-', SE.std())
---------- OLS ---------- size of dictionary = (number of filters, size of imagelets) = (676, 441) average of filters = 1.8871620517154972e-05 +/- 0.0007995474521857563 average energy of filters = 1.0 +/- 4.0734048673293375e-17 ---------- HEH ---------- size of dictionary = (number of filters, size of imagelets) = (676, 441) average of filters = -2.9411542274321333e-05 +/- 0.0008106530645520307 average energy of filters = 1.0 +/- 4.312578046109635e-17 ---------- EMP ---------- size of dictionary = (number of filters, size of imagelets) = (676, 441) average of filters = -4.87013859105051e-05 +/- 0.0008528193053604389 average energy of filters = 1.0 +/- 4.437606199346686e-17 ---------- HAP ---------- size of dictionary = (number of filters, size of imagelets) = (676, 441) average of filters = -2.1390427270893857e-05 +/- 0.0008757678879900148 average energy of filters = 1.0 +/- 4.333666573072855e-17
pname = '/tmp/panel_A' #pname = fname + '_A'
subplotpars = dict( left=0.042, right=1., bottom=0., top=1., wspace=0.05, hspace=0.05,)
fig, axs = plt.subplots(3, 1, figsize=(fig_width/2, fig_width/(1+phi)), gridspec_kw=subplotpars)
for ax, color, homeo_method in zip(axs.ravel(), colors[1:], homeo_methods[1:]):
ax.axis(c=color, lw=2, axisbg='w')
ax.set_facecolor('w')
from shl_scripts import show_dico
fig, ax = show_dico(shl, dico[one_cv][homeo_method], data=data, dim_graph=dim_graph, fig=fig, ax=ax)
# ax.set_ylabel(homeo_method)
ax.text(-10, 29, homeo_method, fontsize=12, color=color, rotation=90)#, backgroundcolor='white'
for ext in FORMATS: fig.savefig(pname + ext, dpi=dpi_export, bbox_inches='tight')
pname = '/tmp/panel_B' #fname + '_B'
from shl_scripts import time_plot
variable = 'F'
alpha = .3
subplotpars = dict(left=0.2, right=.95, bottom=0.2, top=.95)#, wspace=0.05, hspace=0.05,)
fig, ax = plt.subplots(1, 1, figsize=(fig_width/2, fig_width/(1+phi)), gridspec_kw=subplotpars)
for i_cv in range(N_cv):
for color, homeo_method in zip(colors, homeo_methods):
ax.axis(c='b', lw=2, axisbg='w')
ax.set_facecolor('w')
if i_cv==0:
fig, ax = time_plot(shl, dico[i_cv][homeo_method], variable=variable, unit='bits', color=color, label=homeo_method, alpha=alpha_0, fig=fig, ax=ax)
else:
fig, ax = time_plot(shl, dico[i_cv][homeo_method], variable=variable, unit='bits', color=color, alpha=alpha, fig=fig, ax=ax)
ax.legend(loc='best')
ax.set_ylim(Flim1, Flim2)
for ext in FORMATS: fig.savefig(pname + ext, dpi=dpi_export, bbox_inches='tight')
if DEBUG: Image(pname +'.png')
if DEBUG: Image(pname +'.png')
%%tikz -f pdf --save {fname}.pdf
\draw[white, fill=white] (0.\linewidth,0) rectangle (1.\linewidth, .382\linewidth) ;
\draw [anchor=north west] (.0\linewidth, .382\linewidth) node {\includegraphics[width=.5\linewidth]{/tmp/panel_A}};
\draw [anchor=north west] (.5\linewidth, .382\linewidth) node {\includegraphics[width=.5\linewidth]{/tmp/panel_B}};
\begin{scope}[font=\bf\sffamily\large]
\draw [anchor=west,fill=white] (.0\linewidth, .382\linewidth) node [above right=-3mm] {$\mathsf{A}$};
\draw [anchor=west,fill=white] (.53\linewidth, .382\linewidth) node [above right=-3mm] {$\mathsf{B}$};
\end{scope}
!convert -density {dpi_export} {fname}.pdf {fname}.jpg
!convert -density {dpi_export} {fname}.pdf {fname}.png
#!convert -density {dpi_export} -resize 5400 -units pixelsperinch -flatten -compress lzw -depth 8 {fname}.pdf {fname}.tiff
Image(fname +'.png')
As a control, we compare the methods for different parameters:
list_figures = []
for homeo_method in homeo_methods:
opts_ = opts.copy()
opts_.update(homeo_method=homeo_method)
experiments = SHL_set(opts_, tag=tag + '_' + homeo_method, base=10)
experiments.run(variables=variables, n_jobs=n_jobs, verbose=0)
import matplotlib.pyplot as plt
subplotpars = dict(left=0.2, right=.95, bottom=0.2, top=.95, wspace=0.5, hspace=0.35,)
x, y = .05, .8 #-.3
UP = 3
fig, axs = plt.subplots(len(variables), 1, figsize=(UP*fig_width/2, UP*fig_width/(1+phi)), gridspec_kw=subplotpars, sharey=True)
for i_ax, variable in enumerate(variables):
for color, homeo_method in zip(colors, homeo_methods):
opts_ = opts.copy()
opts_.update(homeo_method=homeo_method)
experiments = SHL_set(opts_, tag=tag + '_' + homeo_method, base=10)
fig, axs[i_ax] = experiments.scan(variable=variable, list_figures=[], display='final', fig=fig, ax=axs[i_ax], color=color, display_variable='F', verbose=0, label=homeo_method) #
axs[i_ax].set_xlabel('') #variable
axs[i_ax].text(x, y, variable, transform=axs[i_ax].transAxes)
#axs[i_ax].get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.set_ylim(Flim1, Flim2)
fig.legend(loc='right')
<matplotlib.legend.Legend at 0x1d07fc7d0>
fname = 'figure_CNN'
!rm -fr /tmp/database/Face_DataBase
!mkdir -p /tmp/database && rsync -a "/Users/laurentperrinet/science/VB_These/Rapport d'avancement/database/Face_DataBase" /tmp/database/
#!mkdir -p /tmp/database/ && rsync -a "/Users/laurentperrinet/science/VB_These/Rapport d'avancement/database/Face_DataBase/Raw_DataBase/*" /tmp/database/Face_DataBase
from CHAMP.DataLoader import LoadData
from CHAMP.DataTools import LocalContrastNormalization, FilterInputData, GenerateMask
from CHAMP.Monitor import DisplayDico, DisplayConvergenceCHAMP, DisplayWhere
import os
datapath = os.path.join("/tmp", "database")
path = os.path.join(datapath, "Face_DataBase/Raw_DataBase")
TrSet, TeSet = LoadData('Face', path, decorrelate=False, resize=(65, 65))
# MP Parameters
nb_dico = 20
width = 9
dico_size = (width, width)
l0 = 20
seed = 42
# Learning Parameters
eta = .05
nb_epoch = 500
TrSet, TeSet = LoadData('Face', path, decorrelate=False, resize=(65, 65))
N_TrSet, _, _, _ = LocalContrastNormalization(TrSet)
Filtered_L_TrSet = FilterInputData(
N_TrSet, sigma=0.25, style='Custom', start_R=15)
mask = GenerateMask(full_size=(nb_dico, 1, width, width), sigma=0.8, style='Gaussian')
from CHAMP.CHAMP_Layer import CHAMP_Layer
from CHAMP.DataTools import SaveNetwork, LoadNetwork
homeo_methods = ['None', 'HAP']
for homeo_method, eta_homeo in zip(homeo_methods, [0., 0.0025]):
ffname = 'cache_dir_CNN/CHAMP_low_' + homeo_method + '.pkl'
try:
L1_mask = LoadNetwork(loading_path=ffname)
except:
L1_mask = CHAMP_Layer(l0_sparseness=l0, nb_dico=nb_dico,
dico_size=dico_size, mask=mask, verbose=1)
dico_mask = L1_mask.TrainLayer(
Filtered_L_TrSet, eta=eta, eta_homeo=eta_homeo, nb_epoch=nb_epoch, seed=seed)
SaveNetwork(Network=L1_mask, saving_path=ffname)
pname = '/tmp/panel_A' #pname = fname + '_A'
subplotpars = dict(left=0.042, right=1., bottom=0., top=1., wspace=0.05, hspace=0.05,)
for color, homeo_method in zip(['black', 'green'], homeo_methods):
#fig, axs = plt.subplots(1, 1, figsize=(fig_width/2, fig_width/(1+phi)), gridspec_kw=subplotpars)
ffname = 'cache_dir_CNN/CHAMP_low_' + homeo_method + '.pkl'
L1_mask = LoadNetwork(loading_path=ffname)
fig, ax = DisplayDico(L1_mask.dictionary)
# ax.set_ylabel(homeo_method)
#for ax in list(axs):
# ax.axis(c=color, lw=2, axisbg='w')
# ax.set_facecolor('w')
ax[0].text(-5, 6, homeo_method, fontsize=8, color=color, rotation=90)#, backgroundcolor='white'
plt.tight_layout( pad=0., w_pad=0., h_pad=.0)
for ext in FORMATS: fig.savefig(pname + '_' + homeo_method + ext, dpi=dpi_export, bbox_inches='tight')
<Figure size 576x28.8 with 0 Axes>
<Figure size 576x28.8 with 0 Axes>
pname = '/tmp/panel_B' #fname + '_B'
from shl_scripts import time_plot
variable = 'F'
alpha = .3
subplotpars = dict(left=0.2, right=.95, bottom=0.2, top=.95)#, wspace=0.05, hspace=0.05,)
for color, homeo_method in zip(['black', 'green'], homeo_methods):
#fig, axs = plt.subplots(1, 1, figsize=(fig_width/2, fig_width/(1+phi)), gridspec_kw=subplotpars)
ffname = 'cache_dir_CNN/CHAMP_low_' + homeo_method + '.pkl'
L1_mask = LoadNetwork(loading_path=ffname)
fig, ax = DisplayConvergenceCHAMP(L1_mask, to_display=['histo'], color=color)
ax.axis(c=color, lw=2, axisbg='w')
ax.set_facecolor('w')
ax.set_ylabel('counts')
ax.set_xlabel('feature #')
ax.set_ylim(0, 560)
#ax.text(-8, 7*dim_graph[0], homeo_method, fontsize=12, color=color, rotation=90)#, backgroundcolor='white'
#ax[0].text(-8, 3, homeo_method, fontsize=12, color=color, rotation=90)#, backgroundcolor='white'
fig.suptitle(f'method={homeo_method}', y=1.15, fontsize=12)
for ext in FORMATS: fig.savefig(pname + '_' + homeo_method + ext, dpi=dpi_export, bbox_inches='tight')
if DEBUG: Image(pname +'.png')
%ls -ltr /tmp/panel_*
-rw-r--r-- 1 laurentperrinet wheel 73205 Sep 17 09:18 /tmp/panel_A.pdf -rw-r--r-- 1 laurentperrinet wheel 86792 Sep 17 09:18 /tmp/panel_A.png -rw-r--r-- 1 laurentperrinet wheel 49318 Sep 17 09:18 /tmp/panel_B.pdf -rw-r--r-- 1 laurentperrinet wheel 519882 Sep 17 09:18 /tmp/panel_B.png -rw-r--r-- 1 laurentperrinet wheel 27989 Sep 17 09:18 /tmp/panel_A_None.pdf -rw-r--r-- 1 laurentperrinet wheel 21876 Sep 17 09:18 /tmp/panel_A_None.png -rw-r--r-- 1 laurentperrinet wheel 29692 Sep 17 09:18 /tmp/panel_A_HAP.pdf -rw-r--r-- 1 laurentperrinet wheel 19410 Sep 17 09:18 /tmp/panel_A_HAP.png -rw-r--r-- 1 laurentperrinet wheel 10208 Sep 17 09:18 /tmp/panel_B_None.pdf -rw-r--r-- 1 laurentperrinet wheel 64556 Sep 17 09:18 /tmp/panel_B_None.png -rw-r--r-- 1 laurentperrinet wheel 10679 Sep 17 09:18 /tmp/panel_B_HAP.pdf -rw-r--r-- 1 laurentperrinet wheel 64318 Sep 17 09:18 /tmp/panel_B_HAP.png
%%tikz -f pdf --save {fname}.pdf
\draw[white, fill=white] (0.\linewidth,0) rectangle (1.\linewidth, .382\linewidth) ;
\draw [anchor=north west] (.0\linewidth, .375\linewidth) node {\includegraphics[width=.95\linewidth]{/tmp/panel_A_None}};
\draw [anchor=north west] (.0\linewidth, .300\linewidth) node {\includegraphics[width=.95\linewidth]{/tmp/panel_A_HAP}};
\draw [anchor=north west] (.0\linewidth, .191\linewidth) node {\includegraphics[width=.45\linewidth]{/tmp/panel_B_None}};
\draw [anchor=north west] (.5\linewidth, .191\linewidth) node {\includegraphics[width=.45\linewidth]{/tmp/panel_B_HAP}};
\begin{scope}[font=\bf\sffamily\large]
%\draw [anchor=west,fill=white] (.0\linewidth, .382\linewidth) node [above right=-3mm] {$\mathsf{A}$};
\draw [anchor=west,fill=white] (.0\linewidth, .191\linewidth) node [above right=-3mm] {$\mathsf{A}$};
\draw [anchor=west,fill=white] (.53\linewidth, .191\linewidth) node [above right=-3mm] {$\mathsf{B}$};
\end{scope}
!convert -density {dpi_export} {fname}.pdf {fname}.jpg
!convert -density {dpi_export} {fname}.pdf {fname}.png
#!convert -density {dpi_export} -resize 5400 -units pixelsperinch -flatten -compress lzw -depth 8 {fname}.pdf {fname}.tiff
Image(fname +'.png')
The learning itself is done via a gradient descent but is highly dependent on the coding / decoding algorithm. This belongs to a another function (in the shl_encode.py script)
shl = SHL(**opts)
list_figures = ['show_dico', 'show_Pcum', 'time_plot_F']
dico = shl.learn_dico(data=data, list_figures=list_figures, matname=tag + '_vanilla')
print('size of dictionary = (number of filters, size of imagelets) = ', dico.dictionary.shape)
print('average of filters = ', dico.dictionary.mean(axis=1).mean(),
'+/-', dico.dictionary.mean(axis=1).std())
SE = np.sqrt(np.sum(dico.dictionary**2, axis=1))
print('average energy of filters = ', SE.mean(), '+/-', SE.std())
size of dictionary = (number of filters, size of imagelets) = (676, 441) average of filters = 2.535468337218861e-05 +/- 0.0008451207321658774 average energy of filters = 1.0 +/- 3.959917221265013e-17
help(shl)
Help on SHL in module shl_scripts.shl_experiments object: class SHL(builtins.object) | SHL(height=256, width=256, patch_width=21, N_patches=65536, datapath='../database/', name_database='kodakdb', do_mask=True, do_bandpass=True, over_patches=16, patch_ds=1, n_dictionary=676, learning_algorithm='mp', fit_tol=None, l0_sparseness=21, alpha_MP=0.95, one_over_F=True, n_iter=4097, eta=0.02, beta1=0.99, beta2=0.99, epsilon=10, do_precision=False, eta_precision=0.0, homeo_method='HAP', eta_homeo=0.01, alpha_homeo=0.05, C=3.0, nb_quant=128, P_cum=None, do_sym=False, seed=42, patch_norm=False, batch_size=4096, record_each=32, record_num_batches=1024, n_image=None, DEBUG_DOWNSCALE=1, verbose=0, cache_dir='cache_dir') | | Base class to define SHL experiments: | - initialization | - coding and learning | - visualization | - quantitative analysis | | Methods defined here: | | __init__(self, height=256, width=256, patch_width=21, N_patches=65536, datapath='../database/', name_database='kodakdb', do_mask=True, do_bandpass=True, over_patches=16, patch_ds=1, n_dictionary=676, learning_algorithm='mp', fit_tol=None, l0_sparseness=21, alpha_MP=0.95, one_over_F=True, n_iter=4097, eta=0.02, beta1=0.99, beta2=0.99, epsilon=10, do_precision=False, eta_precision=0.0, homeo_method='HAP', eta_homeo=0.01, alpha_homeo=0.05, C=3.0, nb_quant=128, P_cum=None, do_sym=False, seed=42, patch_norm=False, batch_size=4096, record_each=32, record_num_batches=1024, n_image=None, DEBUG_DOWNSCALE=1, verbose=0, cache_dir='cache_dir') | Initialize self. See help(type(self)) for accurate signature. | | code(self, data, dico, coding_algorithm='mp', matname=None, P_cum=None, fit_tol=None, l0_sparseness=None, gain=None) | | decode(self, sparse_code, dico) | | get_data(self, matname=None, patch_width=None) | | learn_dico(self, dictionary=None, precision=None, P_cum=None, data=None, matname=None, record_each=None, folder_exp=None, list_figures=[], fig_kwargs={'fig': None, 'ax': None}) | | plot_error(self, dico, **fig_kwargs) | | plot_variance(self, sparse_code, **fig_kwargs) | | plot_variance_histogram(self, sparse_code, **fig_kwargs) | | show_Pcum(self, dico, title=None, verbose=False, n_yticks=21, alpha=0.05, c='g', **fig_kwargs) | | show_dico(self, dico, data=None, title=None, **fig_kwargs) | | show_dico_in_order(self, dico, data=None, title=None, **fig_kwargs) | | time_plot(self, dico, variable='kurt', N_nosample=1, **fig_kwargs) | | ---------------------------------------------------------------------- | Data descriptors defined here: | | __dict__ | dictionary for instance variables (if defined) | | __weakref__ | list of weak references to the object (if defined)
help(dico)
Help on SparseHebbianLearning in module shl_scripts.shl_learn object: class SparseHebbianLearning(builtins.object) | SparseHebbianLearning(fit_algorithm='mp', dictionary=None, precision=None, eta=0.003, beta1=0.9, beta2=0.999, epsilon=8, homeo_method='HEH', eta_homeo=0.05, alpha_homeo=0.0, C=5.0, nb_quant=256, P_cum=None, n_dictionary=None, n_iter=10000, batch_size=32, l0_sparseness=None, fit_tol=None, alpha_MP=1.0, do_precision=False, eta_precision=0.01, do_sym=False, record_each=200, record_num_batches=4096, verbose=False, one_over_F=True) | | Sparse Hebbian learning | | Finds a dictionary (a set of atoms) that can best be used to represent data | using a sparse code. | | Parameters | ---------- | | n_dictionary : int, | Number of dictionary elements to extract | | eta : float or dict | Gives the learning parameter for the homeostatic gain. | | n_iter : int, | total number of iterations to perform | | eta_homeo : float | Gives the learning parameter for the homeostatic gain. | | alpha_homeo : float | Gives the smoothing exponent for the homeostatic gain | If equal to 1 the homeostatic learning rule learns a linear relation to | variance. | | dictionary : array of shape (n_dictionary, n_pixels), | initial value of the dictionary for warm restart scenarios | Use ``None`` for a new learning. | | fit_algorithm : {'mp', 'lars', 'cd'} | see sparse_encode | | batch_size : int, | The number of samples to take in each batch. | | l0_sparseness : int, ``0.1 * n_pixels`` by default | Number of nonzero coefficients to target in each column of the | solution. This is only used by `algorithm='lars'`, `algorithm='mp'` and | `algorithm='omp'`. | | fit_tol : float, 1. by default | If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `fit_tol` is the | penalty applied to the L1 norm. | If `algorithm='threshold'`, `fit_tol` is the absolute value of the | threshold below which coefficients will be squashed to zero. | If `algorithm='mp'` or `algorithm='omp'`, `fit_tol` is the tolerance | parameter: the value of the reconstruction error targeted. In this case, | it overrides `l0_sparseness`. | | verbose : | degree of verbosity of the printed output | | Attributes | ---------- | dictionary : array, [n_dictionary, n_pixels] | dictionary extracted from the data | | | Notes | ----- | **References:** | | Olshausen BA, Field DJ (1996). | Emergence of simple-cell receptive field properties by learning a sparse code for natural images. | Nature, 381: 607-609. (http://redwood.berkeley.edu/bruno/papers/nature-paper.pdf) | | Olshausen BA, Field DJ (1997) | Sparse Coding with an Overcomplete Basis Set: A Strategy Employed by V1? | Vision Research, 37: 3311-3325. (http://redwood.berkeley.edu/bruno/papers/VR.pdf) | | See also | -------- | http://scikit-learn.org/stable/auto_examples/decomposition/plot_image_denoising.html | | Methods defined here: | | __init__(self, fit_algorithm='mp', dictionary=None, precision=None, eta=0.003, beta1=0.9, beta2=0.999, epsilon=8, homeo_method='HEH', eta_homeo=0.05, alpha_homeo=0.0, C=5.0, nb_quant=256, P_cum=None, n_dictionary=None, n_iter=10000, batch_size=32, l0_sparseness=None, fit_tol=None, alpha_MP=1.0, do_precision=False, eta_precision=0.01, do_sym=False, record_each=200, record_num_batches=4096, verbose=False, one_over_F=True) | Initialize self. See help(type(self)) for accurate signature. | | fit(self, X, y=None) | Fit the model from data in X. | | Parameters | ---------- | X: array-like, shape (n_samples, n_pixels) | Training vector, where n_samples in the number of samples | and n_pixels is the number of features. | | Returns | ------- | self : object | Returns the instance itself. | | transform(self, X, algorithm=None, l0_sparseness=None, fit_tol=None, alpha_MP=None) | Fit the model from data in X. | | Parameters | ---------- | X: array-like, shape (n_samples, n_pixels) | Training vector, where n_samples in the number of samples | and n_pixels is the number of features. | | Returns | ------- | self : object | Returns sparse code. | | ---------------------------------------------------------------------- | Data descriptors defined here: | | __dict__ | dictionary for instance variables (if defined) | | __weakref__ | list of weak references to the object (if defined)
Loading patches, with or without mask:
N_patches = 12
from shl_scripts.shl_tools import show_data
for i, (do_mask, label) in enumerate(zip([False, True], ['Without mask', 'With mask'])):
opts_ = opts.copy()
opts_.update(DEBUG_DOWNSCALE=1, N_patches=N_patches, n_image=1, do_mask=do_mask, seed=seed, verbose=0)
data_ = SHL(**opts_).get_data(matname=tag)
data_ = data_[:N_patches, :]
fig, axs = show_data(data_)
axs[0].set_ylabel(label);
plt.show()
<Figure size 1080x216 with 0 Axes>
<Figure size 1080x216 with 0 Axes>
fig, ax = None, None
for homeo_method in ['None', 'HAP']:
for algorithm in ['lasso_lars', 'lars', 'elastic', 'omp', 'mp']: # 'threshold', 'lasso_cd',
opts_ = opts.copy()
opts_.update(homeo_method=homeo_method, learning_algorithm=algorithm, verbose=0)
shl = SHL(**opts_)
dico= shl.learn_dico(data=data, list_figures=[],
matname=tag + ' - algorithm={}'.format(algorithm) + ' - homeo_method={}'.format(homeo_method))
fig, ax = shl.time_plot(dico, variable='F', fig=fig, ax=ax, label=algorithm +'_' + homeo_method)
ax.legend()
White Noise Initialization + Learning
shl = SHL(one_over_F=False, **opts)
dico_w = shl.learn_dico(data=data, matname=tag + '_WHITE', list_figures=[])
shl = SHL(one_over_F=True, **opts)
dico_1oF = shl.learn_dico(data=data, matname=tag + '_OVF', list_figures=[])
fig_error, ax_error = None, None
fig_error, ax_error = shl.time_plot(dico_w, variable='F', fig=fig_error, ax=ax_error, color='blue', label='white noise')
fig_error, ax_error = shl.time_plot(dico_1oF, variable='F', fig=fig_error, ax=ax_error, color='red', label='one over f')
#ax_error.set_ylim((0, .65))
ax_error.legend(loc='best')
<matplotlib.legend.Legend at 0x208911bd0>
We use by defaut the strategy of ADAM, see https://arxiv.org/pdf/1412.6980.pdf
shl = SHL(beta1=0., **opts)
dico_fixed = shl.learn_dico(data=data, matname=tag + '_fixed', list_figures=[])
shl = SHL(**opts)
dico_default = shl.learn_dico(data=data, matname=tag + '_default', list_figures=[])
fig_error, ax_error = None, None
fig_error, ax_error = shl.time_plot(dico_fixed, variable='F', fig=fig_error, ax=ax_error, color='blue', label='fixed')
fig_error, ax_error = shl.time_plot(dico_default, variable='F', fig=fig_error, ax=ax_error, color='red', label='ADAM')
#ax_error.set_ylim((0, .65))
ax_error.legend(loc='best')
<matplotlib.legend.Legend at 0x1cfb238d0>
As suggested by AnonReviewer3, we have tested how the convergence was modified by changing the number of neurons. By comparing different numbers of neurons we could re-draw the same figures for the convergence of the algorithm as in our original figures. In addition, we have also checked that this result will hold on a range of sparsity levels. In particular, we found that in general, increasing the l0_sparseness
parameter, the convergence took progressively longer. Importantly, we could see that in both cases, this did not depend on the kind of homeostasis heuristic chosen, proving the generality of our results.
This is shown in the supplementary material that we have added to our revision ("Testing different number of neurons and sparsity") . This useful extension proves the originality of our work as highlighted in point 4, and the generality of these results compared to the parameters of the network.
#from shl_scripts.shl_experiments import SHL_set
#homeo_methods = ['None', 'OLS', 'HEH']
homeo_methods = ['None', 'EMP', 'HAP', 'HEH', 'OLS']
variables = ['l0_sparseness', 'n_dictionary']
list_figures = []
#n_dictionary=21**2
for homeo_method in homeo_methods:
opts_ = opts.copy()
opts_.update(homeo_method=homeo_method, datapath=datapath)
experiments = SHL_set(opts_, tag=tag + '_' + homeo_method)
experiments.run(variables=variables, n_jobs=1, verbose=0)
fig, axs = plt.subplots(len(variables), 1, figsize=(fig_width/2, fig_width/(1+phi)), gridspec_kw=subplotpars, sharey=True)
for i_ax, variable in enumerate(variables):
for color, homeo_method in zip(colors, homeo_methods):
opts_ = opts.copy()
opts_.update(homeo_method=homeo_method, datapath=datapath)
experiments = SHL_set(opts_, tag=tag + '_' + homeo_method)
fig, axs[i_ax] = experiments.scan(variable=variable, list_figures=[], display='final', fig=fig, ax=axs[i_ax], color=color, display_variable='F', verbose=0) #, label=homeo_metho
axs[i_ax].set_xlabel('') #variable
axs[i_ax].text(.1, .8, variable, transform=axs[i_ax].transAxes)
#axs[i_ax].get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
from CHAMP.DataLoader import LoadData
from CHAMP.DataTools import LocalContrastNormalization, FilterInputData, GenerateMask
from CHAMP.Monitor import DisplayDico, DisplayConvergenceCHAMP, DisplayWhere
import os
datapath = os.path.join("/tmp", "database")
path = os.path.join(datapath, "Face_DataBase/Raw_DataBase")
TrSet, TeSet = LoadData('Face', path, decorrelate=False, resize=(65, 65))
to_display = TrSet[0][0, 0:10, :, :, :]
print('Size=', TrSet[0].shape)
DisplayDico(to_display)
Size= torch.Size([1, 400, 1, 65, 65])
(<Figure size 576x57.6 with 10 Axes>, array([<matplotlib.axes._subplots.AxesSubplot object at 0x208acbd90>, <matplotlib.axes._subplots.AxesSubplot object at 0x208d43dd0>, <matplotlib.axes._subplots.AxesSubplot object at 0x208e74b50>, <matplotlib.axes._subplots.AxesSubplot object at 0x1d07219d0>, <matplotlib.axes._subplots.AxesSubplot object at 0x1ced879d0>, <matplotlib.axes._subplots.AxesSubplot object at 0x1cfac8690>, <matplotlib.axes._subplots.AxesSubplot object at 0x208c66910>, <matplotlib.axes._subplots.AxesSubplot object at 0x1cec09f90>, <matplotlib.axes._subplots.AxesSubplot object at 0x2089a7b10>, <matplotlib.axes._subplots.AxesSubplot object at 0x1ce8144d0>], dtype=object))
<Figure size 576x57.6 with 0 Axes>
# MP Parameters
nb_dico = 20
width = 9
dico_size = (width, width)
l0 = 20
seed = 42
# Learning Parameters
eta = .05
nb_epoch = 500
TrSet, TeSet = LoadData('Face', path, decorrelate=False, resize=(65, 65))
N_TrSet, _, _, _ = LocalContrastNormalization(TrSet)
Filtered_L_TrSet = FilterInputData(
N_TrSet, sigma=0.25, style='Custom', start_R=15)
to_display = Filtered_L_TrSet[0][0, 0:10, :, :, :]
DisplayDico(to_display)
mask = GenerateMask(full_size=(nb_dico, 1, width, width), sigma=0.8, style='Gaussian')
DisplayDico(mask)
(<Figure size 576x28.8 with 20 Axes>, array([<matplotlib.axes._subplots.AxesSubplot object at 0x208c71ad0>, <matplotlib.axes._subplots.AxesSubplot object at 0x208e8c710>, <matplotlib.axes._subplots.AxesSubplot object at 0x208b49f10>, <matplotlib.axes._subplots.AxesSubplot object at 0x208a53750>, <matplotlib.axes._subplots.AxesSubplot object at 0x1cdca0f50>, <matplotlib.axes._subplots.AxesSubplot object at 0x208868790>, <matplotlib.axes._subplots.AxesSubplot object at 0x208862410>, <matplotlib.axes._subplots.AxesSubplot object at 0x1ceb7e7d0>, <matplotlib.axes._subplots.AxesSubplot object at 0x1ceaac490>, <matplotlib.axes._subplots.AxesSubplot object at 0x1ceb3f850>, <matplotlib.axes._subplots.AxesSubplot object at 0x1cea3ff90>, <matplotlib.axes._subplots.AxesSubplot object at 0x1cdddb850>, <matplotlib.axes._subplots.AxesSubplot object at 0x1cdd97cd0>, <matplotlib.axes._subplots.AxesSubplot object at 0x1cdcc9890>, <matplotlib.axes._subplots.AxesSubplot object at 0x1cdd48d10>, <matplotlib.axes._subplots.AxesSubplot object at 0x1ce4fe8d0>, <matplotlib.axes._subplots.AxesSubplot object at 0x1ce736d50>, <matplotlib.axes._subplots.AxesSubplot object at 0x1ce873910>, <matplotlib.axes._subplots.AxesSubplot object at 0x1cea71d90>, <matplotlib.axes._subplots.AxesSubplot object at 0x208ba6950>], dtype=object))
<Figure size 576x57.6 with 0 Axes>
<Figure size 576x28.8 with 0 Axes>
from CHAMP.CHAMP_Layer import CHAMP_Layer
from CHAMP.DataTools import SaveNetwork, LoadNetwork
fname = 'cache_dir_CNN/CHAMP_low_None.pkl'
try:
L1_mask = LoadNetwork(loading_path=fname)
except:
L1_mask = CHAMP_Layer(l0_sparseness=l0, nb_dico=nb_dico,
dico_size=dico_size, mask=mask, verbose=2)
dico_mask = L1_mask.TrainLayer(
Filtered_L_TrSet, eta=eta, nb_epoch=nb_epoch, seed=seed)
SaveNetwork(Network=L1_mask, saving_path=fname)
DisplayDico(L1_mask.dictionary)
DisplayConvergenceCHAMP(L1_mask, to_display=['error', 'histo'])
DisplayWhere(L1_mask.where)
(<Figure size 576x216 with 20 Axes>, <matplotlib.axes._subplots.AxesSubplot at 0x2200e3450>)
<Figure size 576x28.8 with 0 Axes>
fname = 'cache_dir_CNN/CHAMP_low_HAP.pkl'
try:
L1_mask = LoadNetwork(loading_path=fname)
except:
# Learning Parameters
eta_homeo = 0.0025
L1_mask = CHAMP_Layer(l0_sparseness=l0, nb_dico=nb_dico,
dico_size=dico_size, mask=mask, verbose=1)
dico_mask = L1_mask.TrainLayer(
Filtered_L_TrSet, eta=eta, eta_homeo=eta_homeo, nb_epoch=nb_epoch, seed=seed)
SaveNetwork(Network=L1_mask, saving_path=fname)
DisplayDico(L1_mask.dictionary)
DisplayConvergenceCHAMP(L1_mask, to_display=['error'])
DisplayConvergenceCHAMP(L1_mask, to_display=['histo'])
DisplayWhere(L1_mask.where)
(<Figure size 576x216 with 20 Axes>, <matplotlib.axes._subplots.AxesSubplot at 0x22060da10>)
<Figure size 576x28.8 with 0 Axes>
from CHAMP.DataTools import Rebuilt
import torch
rebuilt_image = Rebuilt(torch.FloatTensor(L1_mask.code), L1_mask.dictionary)
DisplayDico(rebuilt_image[0:10, :, :, :]);
<Figure size 576x57.6 with 0 Axes>
We train higher-level feature vectors by forcing the network to :
fname = 'cache_dir_CNN/CHAMP_high_None.pkl'
try:
L1_mask = LoadNetwork(loading_path=fname)
except:
nb_dico = 60
width = 19
dico_size = (width, width)
l0 = 5
mask = GenerateMask(full_size=(nb_dico, 1, width, width), sigma=0.8, style='Gaussian')
# Learning Parameters
eta_homeo = 0.0
eta = .05
nb_epoch = 500
# learn
L1_mask = CHAMP_Layer(l0_sparseness=l0, nb_dico=nb_dico,
dico_size=dico_size, mask=mask, verbose=0)
dico_mask = L1_mask.TrainLayer(
Filtered_L_TrSet, eta=eta, eta_homeo=eta_homeo, nb_epoch=nb_epoch, seed=seed)
SaveNetwork(Network=L1_mask, saving_path=fname)
DisplayDico(L1_mask.dictionary)
DisplayConvergenceCHAMP(L1_mask, to_display=['error'])
DisplayConvergenceCHAMP(L1_mask, to_display=['histo'])
DisplayWhere(L1_mask.where);
<Figure size 576x9.6 with 0 Axes>
fname = 'cache_dir_CNN/CHAMP_high_HAP.pkl'
try:
L1_mask = LoadNetwork(loading_path=fname)
except:
nb_dico = 60
width = 19
dico_size = (width, width)
l0 = 5
mask = GenerateMask(full_size=(nb_dico, 1, width, width), sigma=0.8, style='Gaussian')
# Learning Parameters
eta_homeo = 0.0025
eta = .05
nb_epoch = 500
# learn
L1_mask = CHAMP_Layer(l0_sparseness=l0, nb_dico=nb_dico,
dico_size=dico_size, mask=mask, verbose=0)
dico_mask = L1_mask.TrainLayer(
Filtered_L_TrSet, eta=eta, eta_homeo=eta_homeo, nb_epoch=nb_epoch, seed=seed)
SaveNetwork(Network=L1_mask, saving_path=fname)
DisplayDico(L1_mask.dictionary)
DisplayConvergenceCHAMP(L1_mask, to_display=['error'])
DisplayConvergenceCHAMP(L1_mask, to_display=['histo'])
DisplayWhere(L1_mask.where);
<Figure size 576x9.6 with 0 Axes>
%run model.py {tag} 0
tag = HULK n_jobs = 0
<Figure size 432x288 with 0 Axes>
%run model.py 35
tag = 35 n_jobs = 0
%load_ext watermark
%watermark -i -h -m -v -p numpy,matplotlib,shl_scripts
2019-09-17T09:19:27+02:00 CPython 3.7.4 IPython 7.8.0 numpy 1.17.2 matplotlib 3.1.1 shl_scripts 20171221 compiler : Clang 10.0.1 (clang-1001.0.46.4) system : Darwin release : 18.7.0 machine : x86_64 processor : i386 CPU cores : 36 interpreter: 64bit host name : fortytwo
!jupyter nbconvert --to html_embed Annex.ipynb --output=index.html
[NbConvertApp] Converting notebook Annex.ipynb to html_embed /usr/local/lib/python3.7/site-packages/nbconvert/filters/datatypefilter.py:41: UserWarning: Your element with mimetype(s) dict_keys(['image/pdf']) is not able to be represented. mimetypes=output.keys()) [NbConvertApp] Writing 5273241 bytes to index.html
#!jupyter-nbconvert --template report --to pdf Annex.ipynb
#!pandoc Annex.html -o Annex.pdf
#!/Applications/Chromium.app/Contents/MacOS/Chromium --headless --disable-gpu --print-to-pdf=Annex.pdf file:///tmp/Annex.html
#!zip Annex.zip Annex.html
!git status
On branch master Your branch is up to date with 'origin/master'. Changes not staged for commit: (use "git add <file>..." to update what will be committed) (use "git restore <file>..." to discard changes in working directory) modified: Annex.ipynb modified: figure_CNN.pdf modified: figure_CNN.png modified: figure_HAP.pdf modified: figure_HEH.pdf modified: figure_map.pdf modified: index.html no changes added to commit (use "git add" and/or "git commit -a")
!git pull
Already up to date.
!git commit -am' {tag} : re-running notebooks'
[master a87f07f] 35 : re-running notebooks 7 files changed, 120 insertions(+), 1580 deletions(-)
!git push
Enumerating objects: 17, done. Counting objects: 100% (17/17), done. Delta compression using up to 36 threads Compressing objects: 100% (9/9), done. Writing objects: 100% (9/9), 65.11 KiB | 749.00 KiB/s, done. Total 9 (delta 8), reused 0 (delta 0) remote: Resolving deltas: 100% (8/8), completed with 8 local objects. To https://github.com/SpikeAI/HULK ad062e5..a87f07f master -> master