The following script was used to generate the graphs of Figure 6 of the article "Behavioral Diversity Generation in Autonomous Exploration Through Reuse of Past Experience" by Fabien C. Y. Benureau and Pierre-Yves Oudeyer.
The full code is available and is distributed under the Open Science License. For any questions, remarks or difficulties running this code, contact fabien.benureau@gmail.com.
import os
import copy
import math
import numpy as np
import environments
from environments import tools
import experiments
import dotdot
import graphs
from fig5_cluster import planar
graphs.output_notebook()
Loading results, and finding the indexes of the worst experiments for the reuse and no reuse cases (for the coverage at t=5000).
expcfgs = planar()
results = experiments.load_results(expcfgs, 'tcov', mask=(True, True, True))
for nor_res, src_res, tgt_res in results:
min_nor = np.argmin(nor_res['tick_avgs'][-1])
min_tgt = np.argmin(tgt_res['tick_avgs'][-1])
Distribution of the worst no-reuse experiment + cardinal and intercardinal extremum postures
data_min_nor = experiments.load_explorations(expcfgs, rep=min_nor)
def spread(d):
env_cfg = d.job.jobcfg.exploration.env
env = environments.Environment.create(env_cfg)
fig = graphs.spread(d['s_channels'], s_vectors=d['s_vectors'],
e_radius=1.0, e_alpha=0.5, title='{}'.format(d.job.jobcfg.key))
return graphs.posture_extrema(env, d['explorations'], fig=fig, alpha=1.0, radius_factor=0.75, line_factor=1.0)
for nor_data, src_data, tgt_data in data_min_nor:
# zoom code
env_cfg = nor_data.job.jobcfg.exploration.env
env = environments.Environment.create(env_cfg)
zoom = graphs.posture_extrema(env, nor_data['explorations'],
x_range=[0, 0.2], y_range=[0.5, 1],
plot_width=int(80+ 0.4*(450-80)), plot_height=450,
alpha=1.0, radius_factor=0.5, line_factor=1.0)
graphs.show([[spread(nor_data), zoom]])
Distribution of the worst reuse experiment + cardinal and intercardinal extremum postures
data_min_tgt = experiments.load_explorations(expcfgs, rep=min_tgt)
figs = []
for nor_data, src_data, tgt_data in data_min_tgt:
figs.append(spread(src_data))
figs.append(spread(tgt_data))
fig = graphs.show([figs])
Showing motor commands reused by the worst reuse experiment.
def arm_width(env, m_signal):
"""How much lateral space a posture occupies (for screen placement)"""
s_signal = env.execute(m_signal)
xs, ys = zip(*env.posture)
ys, xs = xs, ys
return min(xs), max(xs)
import bokeh.models
def arm_examples(env, examples):
"""Display rotated example of arm postures"""
fig = None
x_offset = 0.025
y_offset = 0.0
examples = copy.deepcopy(examples)
for i, e in enumerate(examples):
x = env.execute(e[0]['m_signal'])['s_signal']['x']
y = env.execute(e[0]['m_signal'])['s_signal']['y']
e[0]['m_signal']['j19'] -= math.degrees(math.atan2(y, x))
e[0]['m_signal']['j19'] = ((e[0]['m_signal']['j19']+180)%(360))-180
x_min, x_max = arm_width(env, e[0]['m_signal'])
x_offset += max(0, -x_min) + 0.05
fig = graphs.posture_explorations(env, [e], fig=fig, x_T=x_offset, y_T=y_offset,
plot_width=3800/4, plot_height=3200/4, grid=False,
x_range=[0.0, 3.8], y_range=[-2.2, 1.0],
alpha=0.50, radius_factor=1.0)
fig.outline_line_color = None
fig.axis.major_tick_out = 4
fig.axis.minor_tick_out = 0
fig.xgrid.grid_line_color = None
fig.xaxis.axis_line_color = None
fig.xaxis.major_tick_line_color = None
fig.xaxis.major_label_text_color = None
fig.yaxis.major_label_text_font_size="14pt"
fig.yaxis[0].ticker=bokeh.models.FixedTicker(ticks=[1, 0, -1, -2, -3])
x_offset += max(0, x_max)
if x_offset > 3.5:
y_offset -= 1.0
x_offset = 0.025
graphs.show(fig)
def _reused_commands(explorations):
expls = []
for expl in explorations:
if 'ReuseExplorer' == expl[0]['from']:
expls.append(expl)
return expls
for nor_data, src_data, tgt_data in data_min_tgt:
env_cfg = tgt_data.job.jobcfg.exploration.env
env_cfg.classname = 'environments.envs.KinArmEuclidean'
env = environments.Environment.create(env_cfg)
reused_commands = _reused_commands(tgt_data['explorations'])#[:50]
arm_examples(env, reused_commands)
The cluster provenance code examines all the exploration data files of this experiment to check and compare their embedded provenance data.
import provenance
prov_data = provenance.cluster(planar()) # this may take a minute or two.
print(prov_data.message())