diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5c0030ad3912a4450bccd4ed914ed86b57b24418
--- /dev/null
+++ b/.github/workflows/run-tests.yml
@@ -0,0 +1,47 @@
+name: run-tests
+
+on: [push, pull_request]
+
+jobs:
+  latest:
+    runs-on: ubuntu-latest
+    strategy:
+      matrix:
+        python-version: [3.5, 3.6, 3.7, 3.8]
+
+    steps:
+    - uses: actions/checkout@v2
+    - name: Set up Python ${{ matrix.python-version }}
+      uses: actions/setup-python@v1
+      with:
+        python-version: ${{ matrix.python-version }}
+    - name: Install dependencies
+      run: |
+        python -m pip install --upgrade pip
+        pip install -r pip-requirements.txt
+    - name: run tests
+      run: |
+        ./run-tests.py
+
+  other-os:
+    runs-on: ${{ matrix.os }}
+    strategy:
+      matrix:
+        os: [macos-latest, windows-latest]
+        python-version: [3.x]
+
+# have to copy steps from above, as anchors are currently
+# not supported by github workflow (Jan 2020).
+    steps:
+    - uses: actions/checkout@v2
+    - name: Set up Python ${{ matrix.python-version }}
+      uses: actions/setup-python@v1
+      with:
+        python-version: ${{ matrix.python-version }}
+    - name: Install dependencies
+      run: |
+        python -m pip install --upgrade pip
+        pip install -r pip-requirements.txt
+    - name: run tests
+      run: |
+        ./run-tests.py
diff --git a/.gitignore b/.gitignore
index ff36a4e825ed8ba546b87b29ef5da02c595c0b05..c624b909d433d9a774d1caa70764b1f5be3c9abc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,4 @@
 # auto generated by setup.py
-/examples/_openPMDdata
 /_examplepictures
 
 # Byte-compiled / optimized / DLL files
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 9d1a63d1ce236d0a2ecad6fa7a56b40eec655ea8..0000000000000000000000000000000000000000
--- a/.travis.yml
+++ /dev/null
@@ -1,88 +0,0 @@
-# Check this file before committing a new version:
-# http://lint.travis-ci.org/
-
-language: python
-sudo: false
-dist: xenial
-
-notifications:
-  email:
-    on_success: change
-    on_failure: change
-
-env:
-  - MATPLOTLIB_V=* NUMPY_V=* SCIPY_V=* CYTHON_V=* PYGMENTS_V="=*"
-#  - MATPLOTLIB_V=2.0.2 NUMPY_V=* SCIPY_V=* CYTHON_V=*
-
-jobs:
-  fast_finish: true
-#  allow_failures:
-    # have this allowed as there is a bug in matplotlib 2.1.0
-#    - env: MATPLOTLIB_V=* NUMPY_V=* SCIPY_V=* CYTHON_V=*
-  include:
-    - stage: Fast test
-      python: 3.5
-      virtualenv:
-        system_site_packages: true
-      before_install: skip
-      install:
-         - pip install 'matplotlib<3.1'
-         - pip install -r pip-requirements.txt
-      before_script: skip
-      script: ./run-tests.py --fast
-
-    - stage: Tests
-      python: 2.7
-      env: MATPLOTLIB_V=2.2.3
-    - python: 3.5
-      env: MATPLOTLIB_V=3.0
-    - python: 3.6
-    - python: 3.7
-
-    # Ubuntu 14.04.5 LTS
-    # python 2.6 is explicitly NOT supported!
-    - name: "Ubuntu 14.04.5 LTS - Python 3.4"
-      python: 3.4
-      env: MATPLOTLIB_V=1.3.1 NUMPY_V=1.8.1 SCIPY_V=0.13.3 CYTHON_V=0.20.1 EXTRAPACK="libgfortran<3" PYGMENTS_V="<2.4"
-
-    # Ubuntu 16.04.3 LTS
-    - name: "Ubuntu 16.04.3 LTS - Python 2.7"
-      python: 2.7
-      env: MATPLOTLIB_V=1.5.1 NUMPY_V=1.11.0 SCIPY_V=0.17.0 CYTHON_V=0.23.4
-    - name: "Ubuntu 16.04.3 LTS - Python 3.5"
-      python: 3.5
-      env: MATPLOTLIB_V=1.5.1 NUMPY_V=1.11.0 SCIPY_V=0.17.0 CYTHON_V=0.23.4
-
-    # Ubuntu 18.04 LTS
-    - name: "Ubuntu 18.04 LTS - Python 2.7"
-      python: 2.7
-      env: MATPLOTLIB_V=2.1.1 NUMPY_V=1.13.3 SCIPY_V=0.19.1 CYTHON_V=0.26.1
-    - name: "Ubuntu 18.04 LTS - Python 3.6"
-      python: 3.6
-      env: MATPLOTLIB_V=2.1.1 NUMPY_V=1.13.3 SCIPY_V=0.19.1 CYTHON_V=0.26.1
-
-# Install conda
-before_install:
-  - wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh;
-  - chmod +x miniconda.sh
-  - ./miniconda.sh -b -p $HOME/miniconda
-  - export PATH=$HOME/miniconda/bin:$PATH
-  - conda update --yes conda;
-  - conda create -yn pyenv python=$TRAVIS_PYTHON_VERSION atlas numpy=$NUMPY_V scipy=$SCIPY_V matplotlib=$MATPLOTLIB_V nose cython=$CYTHON_V h5py numexpr sphinx pygments$PYGMENTS_V $EXTRAPACK
-  - source activate pyenv
-
-install:
-  - pip install -r pip-requirements.txt
-  - python setup.py install
-
-before_script:
-  - uname -a
-  - free -m
-  - df -h
-  - ulimit -a
-  - python -V
-  - cython --version
-
-# run tests
-script:
-  - ./run-tests.py
diff --git a/CHANGELOG.md b/CHANGELOG.md
index d9e8c4abd684f2c1b2f3c4b4ebf154049470b8e6..be321d29cef3b8450488352a44e17f9d2a60b6c1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -2,9 +2,33 @@
 
 ## current master
 
-* New convenience method `Field.copy`
+**Highlights**
+
 * Parallelized implementation of `Field.map_coordinates`
+* Added support for reading files written by the fbpic v0.13.0 code ( https://github.com/fbpic ). The fields can be accessed by `Er` and `Etheta`, which have been introduced to the fbpic data reader. Particles are saved in Cartesian coordinates, hence the interface does not change there.
+* Reimplementation of `Field.fft`, see below.
+
+**Incompatible adjustments to previous version**
+
 * Reimplementation of `Field.fft`. This changes the phases of Fourier transforms in a way to make it more consistent. However, if your code depends on the phases, `Field.fft()` now has a parameter `old_behaviour` that can be used to switch back to the old behaviour.
+* Indexing a field by a number (integer or float) will now remove the according axis altogether, instead of leaving behind a length-1 axis.
+A new class `KeepDim` was introduced through which the old behaviour can still be used.
+Behaviour of PostPic before this change:
+```
+field.shape == (x,y,z)
+field[:, 0.0, :].shape == (x,1,z)
+```
+Using the new class `KeepDim`, it is possible to retain that behaviour in the new version:
+```
+field.shape == (x,y,z)
+field[:, 0.0, :].shape == (x, z)
+field[:, KeepDim(0.0), :].shape == (x,1,z)
+```
+
+**Other improvements and new features**
+
+* New convenience method `Field.copy`
+
 
 ## v0.4
 2019-01-14
diff --git a/README.md b/README.md
index 8ccbeda583042f27b61ebab5a99958e903ea7462..86c52aafec7e98ad1cd0e66d439ffda06b8654c6 100644
--- a/README.md
+++ b/README.md
@@ -27,19 +27,26 @@ Additionally postpic can plot and label your plot automatically. This makes it e
 Installation
 ------------
 
-Postpic can be used with python2 and python3. However the usage of python3 is recommended.
+Postpic can be used with `python2` and `python3`. However the usage of `python3` is recommended.
 
-**Users** should install the latest version directly from github using pip (python package manager):
+**Users** should install the latest version directly from github using `pip` (python package manager):
 
 `pip install --user git+https://github.com/skuschel/postpic.git`
 
-The latest *release* is also available ib the python package index [pypi](https://pypi.python.org/pypi/postpic/), thus it can be installed by using the python package manager pip:
+Please note that, depending on your system python setup, `pip` may default to `python2`.
+In that case you will need use `pip3` instead to make sure that postpic is installed for `python3`:
 
-`pip install postpic`
+`pip3 install --user git+https://github.com/skuschel/postpic.git`
+
+The latest *release* is also available in the python package index [pypi](https://pypi.python.org/pypi/postpic/), thus it can be installed by using the python package manager pip:
+
+`pip install --user postpic`
+
+In both cases the `--user` option makes sure that the package can be installed without `root` privileges by installing it into your user profile.
 
 **Developers** should clone the git repository (or their fork of it) and install it using
 
-`python setup.py develop --user`
+`pip install --user -e .`
 
 This command will link the current folder to global python scope, such that changing the code will immediately update the installed package.
 
diff --git a/examples/.gitignore b/examples/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..53403352f7d9b294a4c2bc36885a04793a5aacc4
--- /dev/null
+++ b/examples/.gitignore
@@ -0,0 +1,2 @@
+kspace-test-2d/
+_openPMDdata/
diff --git a/examples/kspace-test-2d.py b/examples/kspace-test-2d.py
new file mode 100755
index 0000000000000000000000000000000000000000..da73acc1fcdf51d0b29855046ef881692657722d
--- /dev/null
+++ b/examples/kspace-test-2d.py
@@ -0,0 +1,336 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+# In[1]:
+
+import sys
+
+
+def download(url, file):
+    import urllib3
+    import shutil
+    import os
+    if os.path.isfile(file):
+        return True
+    try:
+        urllib3.disable_warnings()
+        http = urllib3.PoolManager()
+        print('downloading {:} ...'.format(file))
+        with http.request('GET', url, preload_content=False) as r, open(file, 'wb') as out_file:
+            shutil.copyfileobj(r, out_file)
+        success = True
+    except urllib3.exceptions.MaxRetryError:
+        success = False
+    return success
+
+def main():
+    import os
+    import os.path as osp
+    datadir = 'examples/kspace-test-2d'
+    tarball = osp.join(datadir, 'kspace-test-2d-resources.tar.gz')
+    tarball_url = 'https://blinne.net/files/postpic/kspace-test-2d-resources.tar.gz'
+    if not os.path.exists(datadir):
+        os.mkdir(datadir)
+    s = download(tarball_url, tarball)
+
+    if s:
+        import hashlib
+        chksum = hashlib.sha256(open(tarball, 'rb').read()).hexdigest()
+        if chksum != "54bbeae19e1f412ddd475f565c2193e92922ed8a22e7eb3ecb4d73b5cf193b24":
+            os.remove(tarball)
+            s = False
+
+    if not s:
+        print('Failed to Download example data. Skipping this example.')
+        return
+
+    import tarfile
+    tar = tarfile.open(tarball)
+    tar.extractall(datadir)
+
+
+    import matplotlib
+    matplotlib.use('Agg')
+
+    font = {'size'   : 12}
+    matplotlib.rc('font', **font)
+
+    import copy
+    import postpic as pp
+    import numpy as np
+    import pickle
+
+    try:
+        import sdf
+        sdfavail = True
+    except ImportError:
+        sdfavail = False
+
+    # basic constants
+    micro = 1e-6
+    femto = 1e-15
+    c = pp.PhysicalConstants.c
+
+    # known parameters from simulation
+    lam = 0.5 * micro
+    k0 = 2*np.pi/lam
+    f0 = pp.PhysicalConstants.c/lam
+
+    pymajorver = str(sys.version_info[0])
+
+    if sdfavail:
+        pp.chooseCode("EPOCH")
+
+        dump = pp.readDump(osp.join(datadir, '0002.sdf'))
+        plotter = pp.plotting.plottercls(dump, autosave=False)
+        fields = dict()
+        for fc in ['Ey', 'Bz']:
+            fields[fc] = getattr(dump, fc)()
+            fields[fc].saveto(osp.join(datadir, '0002_'+fc+pymajorver), compressed=False)
+        t = dump.time()
+        dt = t/dump.timestep()
+        pickle.dump(dict(t=t, dt=dt), open(osp.join(datadir,'0002_meta'+pymajorver+'.pickle'), 'wb'))
+    else:
+        fields = dict()
+        for fc in ['Ey', 'Bz']:
+            fields[fc] = pp.Field.loadfrom(osp.join(datadir,'0002_{}.npz').format(fc+pymajorver))
+        meta = pickle.load(open(osp.join(datadir,'0002_meta'+pymajorver+'.pickle'), 'rb'))
+        t = meta['t']
+        dt = meta['dt']
+        plotter = pp.plotting.plottercls(None, autosave=False)
+
+    Ey = fields['Ey']
+    # grid spacing from field
+    dx = [ax.grid[1] - ax.grid[0] for ax in Ey.axes]
+
+
+    #w0 = 2*np.pi*f0
+    #w0 = pp.PhysicalConstants.c * k0
+
+    wn = np.pi/dt
+    omega_yee = pp.helper.omega_yee_factory(dx=dx, dt=dt)
+
+    wyee = omega_yee([k0,0])
+    w0 = pp.helper.omega_free([k0,0])
+
+    print('dx', dx)
+    print('t', t)
+    print('dt', dt)
+    print('wn', wn)
+    print('w0', w0)
+    print('wyee', wyee)
+    print('wyee/w0', wyee/w0)
+    print('wyee/wn', wyee/wn)
+    print('lam/dx[0]', lam/dx[0])
+
+    print('cos(1/2 wyee dt)', np.cos(1/2 * wyee * dt))
+
+    vg_yee    = c*np.cos(k0*dx[0]/2.0)/np.sqrt(1-(c*dt/dx[0]*np.sin(k0*dx[0]/2.0))**2)
+    print('vg/c', vg_yee/c)
+
+    r = np.sqrt(1.0 - (pp.PhysicalConstants.c * dt)**2 * (1/dx[0]*np.sin(1/2.0*k0*dx[0]))**2)
+    print('r', r)
+
+
+    # In[2]:
+
+
+
+    omega_yee = pp.helper.omega_yee_factory(dx=Ey.spacing, dt=dt)
+    lin_int_response_omega = pp.helper._linear_interpolation_frequency_response(dt)
+    lin_int_response_k = pp.helper._linear_interpolation_frequency_response_on_k(lin_int_response_omega,
+                                                                                Ey.fft().axes, omega_yee)
+
+    lin_int_response_k_vac = pp.helper._linear_interpolation_frequency_response_on_k(lin_int_response_omega,
+                                                                                Ey.fft().axes, pp.helper.omega_free)
+
+
+    # In[3]:
+
+
+    _=plotter.plotField(Ey[:,0.0])
+
+
+    # In[4]:
+
+
+    kspace = dict()
+    component = "Ey"
+
+
+    # In[5]:
+
+
+    #key = 'default epoch map=yee, omega=vac'
+    key = 'linresponse map=yee, omega=vac'
+    if sdfavail:
+        kspace[key] = abs(dump.kspace_Ey(solver='yee'))
+    else:
+        kspace[key] = abs(pp.helper.kspace(component, fields=dict(Ey=fields['Ey'],
+                                                                Bz=fields['Bz'].fft()/lin_int_response_k),
+                                interpolation='fourier'))
+        #  using the helper function `kspace_epoch_like` would yield same result:
+        # kspace[key] = abs(pp.helper.kspace_epoch_like(component, fields=fields, dt=dt, omega_func=omega_yee))
+    normalisation = 1.0/np.max(kspace[key].matrix)
+    kspace[key] *= normalisation
+    kspace[key].name = r'corrected $\vec{k}$-space, $\omega_0=c|\vec{k}|$'
+    kspace[key].unit = ''
+
+
+    # In[6]:
+
+
+    key = 'simple fft'
+    kspace[key] = abs(Ey.fft()) * normalisation
+    kspace[key].name = r'plain fft'
+    kspace[key].unit = ''
+
+
+    # In[7]:
+
+
+    key = 'fourier'
+    kspace[key] = abs(pp.helper.kspace(component,
+                                    fields=fields,
+                                    interpolation='fourier')
+                    ) * normalisation
+    kspace[key].name = u'naïve $\\vec{k}$-space, $\\omega_0=c|\\vec{k}|$'
+    kspace[key].unit = ''
+
+
+    # In[8]:
+
+
+    key = 'fourier yee'
+    kspace[key] = abs(pp.helper.kspace(component,
+                                    fields=fields, interpolation='fourier',
+                                    omega_func=omega_yee)
+                    ) * normalisation
+    kspace[key].name = u'naïve $\\vec{k}$-space, $\\omega_0=\\omega_\\mathrm{grid}$'
+    kspace[key].unit = ''
+
+
+    # In[9]:
+
+
+    key = 'linresponse map=yee, omega=yee'
+    kspace[key] = abs(pp.helper.kspace(component, fields=dict(Ey=fields['Ey'],
+                                                        Bz=fields['Bz'].fft()/lin_int_response_k),
+                                interpolation='fourier', omega_func=omega_yee)
+                    ) * normalisation
+    kspace[key].name = r'corrected $\vec{k}$-space, $\omega_0=\omega_\mathrm{grid}$'
+    kspace[key].unit = ''
+
+
+    # In[10]:
+
+
+    slices = [slice(360-120, 360+120), slice(120, 121)]
+
+
+    # In[11]:
+
+
+    keys = ['simple fft',
+            'fourier yee',
+            'fourier',
+            'linresponse map=yee, omega=yee',
+            'linresponse map=yee, omega=vac'
+            ]
+    figure2 = plotter.plotFields1d(*[kspace[k][slices] for k in keys],
+                                log10plot=True, ylim=(5e-17, 5))
+    figure2.set_figwidth(8)
+    figure2.set_figheight(6)
+    while figure2.axes[0].texts:
+        figure2.axes[0].texts[-1].remove()
+
+    figure2.axes[0].set_title('')
+    figure2.axes[0].set_ylabel(r'$|E_y(k_x,0,0)|\, [a. u.]$')
+    figure2.axes[0].set_xlabel(r'$k_x\,[m^{-1}]$')
+    figure2.tight_layout()
+    figure2.savefig(osp.join(datadir, 'gaussian-kspace.pdf'))
+
+    print("Integrated ghost peaks")
+    for k in keys:
+        I = kspace[k][:0.0,:].integrate().matrix
+        print(k, I)
+        if k == 'linresponse map=yee, omega=vac':
+            if I < 30000000.:
+                print('linresponse map=yee, omega=vac value is low enough: YES' )
+            else:
+                print('linresponse map=yee, omega=vac value is low enough: NO' )
+                print('Something is WRONG' )
+                sys.exit(1)
+
+
+
+    # In[13]:
+
+
+    if sdfavail:
+        kspace_ey = dump.kspace_Ey(solver='yee')
+    else:
+        kspace_ey = pp.helper.kspace_epoch_like(component, fields=fields, dt=dt, omega_func=omega_yee)
+    complex_ey = kspace_ey.fft()
+    envelope_ey_2d = abs(complex_ey)[:,:].squeeze()
+    try:
+        from skimage.restoration import unwrap_phase
+        phase_ey = complex_ey.replace_data( unwrap_phase(np.angle(complex_ey)) )
+    except ImportError:
+        phase_ey = complex_ey.replace_data( np.angle(complex_ey) )
+    phase_ey_2d = phase_ey[:,:].squeeze()
+
+
+    # In[14]:
+
+
+    ey = complex_ey.real[-0.1e-5:0.2e-5, :]
+    #ey = Ey
+
+    ey.name = r'$E_y$'
+    ey.unit = r'$\frac{\mathrm{V}}{\mathrm{m}}$'
+
+    figure = plotter.plotField2d(ey)
+    figure.set_figwidth(6)
+    figure.axes[0].set_title(r'')#$\Re E_y\ [\frac{\mathrm{V}}{\mathrm{m}}]$')
+    figure.axes[0].set_xlabel(u'$x\\,[µm]$')
+    figure.axes[0].set_ylabel(u'$y\\,[µm]$')
+
+    import matplotlib.ticker as ticker
+    ticks_x = ticker.FuncFormatter(lambda x, pos: '{0:g}'.format(x/1e-6))
+    figure.axes[0].xaxis.set_major_formatter(ticks_x)
+    figure.axes[0].yaxis.set_major_formatter(ticks_x)
+
+    try:
+        figure.axes[0].images[0].colorbar.remove()
+    except AttributeError:
+        pass
+    figure.colorbar(figure.axes[0].images[0], format='%6.0e', pad=0.15, label=r'$\Re E_y\ [\frac{\mathrm{V}}{\mathrm{m}}]$')
+
+    axes2 = figure.axes[0].twinx()
+    axes2.set_ylabel(r'$\operatorname{Arg}(E_y)\, [\pi],\;|E_y|\, [a. u.]$')
+
+    env = abs(complex_ey[-0.1e-5:0.2e-5,0.0])
+    m = np.max(env)
+    env = (env/m*40/np.pi).squeeze()
+    p = phase_ey[-0.1e-5:0.2e-5,0.0].squeeze()/np.pi
+
+    _ = axes2.plot(env.grid, env.matrix, label=r'$|E_y|\, [a. u.]$')
+    _ = axes2.plot(p.grid, p.matrix, label=r'$\operatorname{Arg}(E_y)\, [\pi]$')
+
+    handles, labels = axes2.get_legend_handles_labels()
+    axes2.legend(handles, labels)
+
+    #figure.axes[0].set_title(r'$E_y\,[V/m]$')
+
+    figure.set_figwidth(6)
+    figure.set_figheight(6)
+    figure.tight_layout()
+    figure.savefig(osp.join(datadir, 'gaussian-env-arg.pdf'))
+
+
+    # In[ ]:
+
+
+if __name__=='__main__':
+    main()
diff --git a/postpic/_compat/functions.py b/postpic/_compat/functions.py
index 70ae085ff3ff9dc355329334da352215934ce96f..8fcce244bf02e783d9b67d7a77402979cc367a9d 100644
--- a/postpic/_compat/functions.py
+++ b/postpic/_compat/functions.py
@@ -27,6 +27,11 @@ import scipy as sp
 import scipy.signal as sps
 import collections
 
+try:
+    from collections.abc import Iterable
+except ImportError:
+    from collections import Iterable
+
 
 def np_meshgrid(*args, **kwargs):
     if len(args) == 0:
@@ -50,9 +55,9 @@ def np_moveaxis(*args, **kwargs):
     a, source, destination = args
 
     # twice a quick implementation of numpy.numeric.normalize_axis_tuple
-    if not isinstance(source, collections.Iterable):
+    if not isinstance(source, Iterable):
         source = (source,)
-    if not isinstance(destination, collections.Iterable):
+    if not isinstance(destination, Iterable):
         destination = (destination,)
     source = [s % a.ndim for s in source]
     destination = [d % a.ndim for d in destination]
diff --git a/postpic/_field_calc.py b/postpic/_field_calc.py
index bb2a8016ebb7ea1763c1ff436cb7b80f86059eea..5e097a2e473ab7563e56caaca13671f2c1dd737d 100644
--- a/postpic/_field_calc.py
+++ b/postpic/_field_calc.py
@@ -14,7 +14,7 @@
 # You should have received a copy of the GNU General Public License
 # along with postpic. If not, see <http://www.gnu.org/licenses/>.
 #
-# Stephan Kuschel 2014
+# Stephan Kuschel 2014-2019
 # Alexander Blinne, 2017
 """
 Field related routines.
@@ -51,12 +51,12 @@ class FieldAnalyzer(object):
         pass
 
     # General interface for everything
-    def _createfieldfromdata(self, data, gridkey):
+    def _createfieldfromdata(self, data, gridkey, **kwargs):
         ret = Field(np.float64(data))
-        self.setgridtofield(ret, gridkey)
+        self.setgridtofield(ret, gridkey, **kwargs)
         return ret
 
-    def createfieldfromkey(self, key, gridkey=None):
+    def createfieldfromkey(self, key, gridkey=None, **kwargs):
         '''
         This method creates a Field object from the data identified by "key".
         The Grid is also inferred from that key unless an alternate "gridkey"
@@ -64,27 +64,39 @@ class FieldAnalyzer(object):
         '''
         if gridkey is None:
             gridkey = key
-        ret = self._createfieldfromdata(self.data(key), gridkey)
+        ret = self._createfieldfromdata(self.data(key, **kwargs), gridkey, **kwargs)
         ret.name = key
         return ret
 
-    def getaxisobj(self, gridkey, axis):
+    def getaxisobj(self, gridkey, axis, **kwargs):
         '''
         returns an Axis object for the "axis" and the grid defined by "gridkey".
+
+        **kwargs can be any dictionary overriding from the grid for a specific axis,
+        e.g. {'theta': [0, pi/2, pi, 3/2*pi]}
         '''
-        name = {0: 'x', 1: 'y', 2: 'z'}[helper.axesidentify[axis]]
-        ax = Axis(name=name, unit='m', grid_node=self.gridnode(gridkey, axis))
+        name = {0: 'x', 1: 'y', 2: 'z', 90: 'r', 91: 'theta'}[helper.axesidentify[axis]]
+        grid = kwargs.pop(name, None)
+        if grid is not None:
+            # override with given grid
+            ax = Axis(name=name, unit='m', grid=grid)
+        else:
+            ax = Axis(name=name, unit='m', grid_node=self.gridnode(gridkey, axis))
         return ax
 
-    def setgridtofield(self, field, gridkey):
+    def _defaultaxisorder(self, gridkey):
+        return ('x', 'y', 'z')
+
+    def setgridtofield(self, field, gridkey, **kwargs):
         '''
         add spacial field information to the given field object.
         '''
-        field.setaxisobj('x', self.getaxisobj(gridkey, 'x'))
+        x, y, z = self._defaultaxisorder(gridkey)
+        field.setaxisobj(x, self.getaxisobj(gridkey, x, **kwargs))
         if field.dimensions > 1:
-            field.setaxisobj('y', self.getaxisobj(gridkey, 'y'))
+            field.setaxisobj(y, self.getaxisobj(gridkey, y, **kwargs))
         if field.dimensions > 2:
-            field.setaxisobj('z', self.getaxisobj(gridkey, 'z'))
+            field.setaxisobj(z, self.getaxisobj(gridkey, z, **kwargs))
 
     # --- Always return an object of Field type
     # just to shortcut
@@ -98,6 +110,12 @@ class FieldAnalyzer(object):
     def _Ez(self, **kwargs):
         return np.float64(self.dataE('z', **kwargs))
 
+    def _Er(self, **kwargs):
+        return np.float64(self.dataE('r', **kwargs))
+
+    def _Etheta(self, **kwargs):
+        return np.float64(self.dataE('theta', **kwargs))
+
     def _Bx(self, **kwargs):
         return np.float64(self.dataB('x', **kwargs))
 
@@ -107,14 +125,20 @@ class FieldAnalyzer(object):
     def _Bz(self, **kwargs):
         return np.float64(self.dataB('z', **kwargs))
 
-    def createfieldsfromkeys(self, *keys):
+    def _Br(self, **kwargs):
+        return np.float64(self.dataB('r', **kwargs))
+
+    def _Btheta(self, **kwargs):
+        return np.float64(self.dataB('theta', **kwargs))
+
+    def createfieldsfromkeys(self, *keys, **kwargs):
         for key in keys:
-            yield self.createfieldfromkey(key)
+            yield self.createfieldfromkey(key, **kwargs)
 
     # most common fields listed here nicely
     def Ex(self, **kwargs):
         ret = self._createfieldfromdata(self._Ex(**kwargs),
-                                        self.gridkeyE('x', **kwargs))
+                                        self.gridkeyE('x', **kwargs), **kwargs)
         ret.unit = 'V/m'
         ret.name = 'Ex'
         ret.shortname = 'Ex'
@@ -122,7 +146,7 @@ class FieldAnalyzer(object):
 
     def Ey(self, **kwargs):
         ret = self._createfieldfromdata(self._Ey(**kwargs),
-                                        self.gridkeyE('y', **kwargs))
+                                        self.gridkeyE('y', **kwargs), **kwargs)
         ret.unit = 'V/m'
         ret.name = 'Ey'
         ret.shortname = 'Ey'
@@ -130,15 +154,31 @@ class FieldAnalyzer(object):
 
     def Ez(self, **kwargs):
         ret = self._createfieldfromdata(self._Ez(**kwargs),
-                                        self.gridkeyE('z', **kwargs))
+                                        self.gridkeyE('z', **kwargs), **kwargs)
         ret.unit = 'V/m'
         ret.name = 'Ez'
         ret.shortname = 'Ez'
         return ret
 
+    def Er(self, **kwargs):
+        ret = self._createfieldfromdata(self._Er(**kwargs),
+                                        self.gridkeyE('r', **kwargs), **kwargs)
+        ret.unit = 'V/m'
+        ret.name = 'Er'
+        ret.shortname = 'Er'
+        return ret
+
+    def Etheta(self, **kwargs):
+        ret = self._createfieldfromdata(self._Etheta(**kwargs),
+                                        self.gridkeyE('theta', **kwargs), **kwargs)
+        ret.unit = 'V/m'
+        ret.name = 'Etheta'
+        ret.shortname = 'Etheta'
+        return ret
+
     def Bx(self, **kwargs):
         ret = self._createfieldfromdata(self._Bx(**kwargs),
-                                        self.gridkeyB('x', **kwargs))
+                                        self.gridkeyB('x', **kwargs), **kwargs)
         ret.unit = 'T'
         ret.name = 'Bx'
         ret.shortname = 'Bx'
@@ -146,7 +186,7 @@ class FieldAnalyzer(object):
 
     def By(self, **kwargs):
         ret = self._createfieldfromdata(self._By(**kwargs),
-                                        self.gridkeyB('y', **kwargs))
+                                        self.gridkeyB('y', **kwargs), **kwargs)
         ret.unit = 'T'
         ret.name = 'By'
         ret.shortname = 'By'
@@ -154,12 +194,28 @@ class FieldAnalyzer(object):
 
     def Bz(self, **kwargs):
         ret = self._createfieldfromdata(self._Bz(**kwargs),
-                                        self.gridkeyB('z', **kwargs))
+                                        self.gridkeyB('z', **kwargs), **kwargs)
         ret.unit = 'T'
         ret.name = 'Bz'
         ret.shortname = 'Bz'
         return ret
 
+    def Br(self, **kwargs):
+        ret = self._createfieldfromdata(self._Br(**kwargs),
+                                        self.gridkeyB('r', **kwargs), **kwargs)
+        ret.unit = 'V/m'
+        ret.name = 'Br'
+        ret.shortname = 'Br'
+        return ret
+
+    def Btheta(self, **kwargs):
+        ret = self._createfieldfromdata(self._Btheta(**kwargs),
+                                        self.gridkeyB('theta', **kwargs), **kwargs)
+        ret.unit = 'V/m'
+        ret.name = 'Btheta'
+        ret.shortname = 'Btheta'
+        return ret
+
     # --- spezielle Funktionen
 
     def _kspace(self, component, fields, alignment='auto', solver=None, **kwargs):
diff --git a/postpic/datahandling.py b/postpic/datahandling.py
index fa03aded6217b65125b36397cce427240a3cac94..b0f6a522f03a508e46868cdec78766d9055e05f4 100644
--- a/postpic/datahandling.py
+++ b/postpic/datahandling.py
@@ -46,7 +46,11 @@ from __future__ import absolute_import, division, print_function, unicode_litera
 
 import sys
 
-import collections
+try:
+    from collections.abc import Iterable, Mapping
+except ImportError:
+    from collections import Iterable, Mapping
+
 import copy
 import warnings
 import os
@@ -62,6 +66,7 @@ import numexpr as ne
 from ._compat import tukey, meshgrid, broadcast_to, NDArrayOperatorsMixin
 from . import helper
 from . import io
+from .helper_fft import fft
 
 if sys.version[0] == '2':
     import functools32 as functools
@@ -74,38 +79,6 @@ else:
     from itertools import zip_longest
 
 
-try:
-    import psutil
-    nproc = psutil.cpu_count(logical=False)
-except ImportError:
-    try:
-        nproc = os.cpu_count()
-    except AttributeError:
-        import multiprocessing
-        nproc = multiprocessing.cpu_count()
-
-
-try:
-    # pyfftw is, in most situations, faster than numpys fft,
-    # although pyfftw will benefit from multithreading only on very large arrays
-    # on a 720x240x240 3D transform multithreading still doesn't give a large benefit
-    # benchmarks of a 720x240x240 transform of real data on a Intel(R) Xeon(R) CPU
-    # E5-1620 v4 @ 3.50GHz:
-    # numpy.fft: 3.6 seconds
-    # pyfftw, nproc=4: first transform 2.2s, further transforms 1.8s
-    # pyfftw, nproc=1: first transform 3.4s, further transforms 2.8s
-    # Try to import pyFFTW's numpy_fft interface
-    import pyfftw.interfaces.cache as fftw_cache
-    import pyfftw.interfaces.numpy_fft as fftw
-    fftw_cache.enable()
-    fft = fftw
-    fft_kwargs = dict(planner_effort='FFTW_ESTIMATE', threads=nproc)
-except ImportError:
-    # pyFFTW is not available, just import numpys fft
-    import numpy.fft as fft
-    fft_kwargs = dict()
-
-
 try:
     with warnings.catch_warnings():
         # skimage produces a DeprecationWarning by importing `imp`. We will silence this warning
@@ -116,7 +89,12 @@ except ImportError:
     unwrap_phase = None
 
 
-__all__ = ['Field', 'Axis']
+__all__ = ['KeepDim', 'Field', 'Axis']
+
+
+class KeepDim(object):
+    def __init__(self, value):
+        self.value = value
 
 
 class Axis(object):
@@ -146,6 +124,9 @@ class Axis(object):
             if self._grid_node.ndim != 1:
                 raise ValueError("Passed array grid_node has ndim != 1.")
             if helper.monotonicity(self._grid_node) == 0:
+                if np.isclose(self._grid_node[0], self._grid_node[-1], atol=0):
+                    s = 'Grid_node spacing is zero on axis "{}" at value {}.'
+                    raise ValueError(s.format(self.name, self._grid_node[0]))
                 raise ValueError("Passed array grid_node is not monotonous.")
 
         self._grid = kwargs.pop('grid', None)
@@ -155,12 +136,15 @@ class Axis(object):
             if self._grid.ndim != 1:
                 raise ValueError("Passed array grid has ndim != 1.")
             if helper.monotonicity(self._grid) == 0:
+                if np.isclose(self._grid[0], self._grid[-1], atol=0):
+                    s = 'Grid spacing is zero on axis "{}" at value {}.'
+                    raise ValueError(s.format(self.name, self._grid[0]))
                 raise ValueError("Passed array grid is not monotonous.")
 
         self._extent = kwargs.pop('extent', None)
 
         if self._extent is not None:
-            if not isinstance(self._extent, collections.Iterable) or len(self._extent) != 2:
+            if not isinstance(self._extent, Iterable) or len(self._extent) != 2:
                 raise ValueError("Passed extent is not an iterable of length 2")
 
         self._n = kwargs.pop('n', None)
@@ -413,22 +397,32 @@ class Axis(object):
         Applies some checks and transformations to the object passed
         to __getitem__
         """
+        if isinstance(index, KeepDim):
+            index = index.value
+            keepdim = True
+        else:
+            keepdim = False
+
         if isinstance(index, slice):
             if any(helper.is_non_integer_real_number(x) for x in (index.start, index.stop)):
                 if index.step is not None:
                     raise IndexError('Non-Integer slices must have step == None')
                 return self._extent_to_slice((index.start, index.stop))
             return index
-        else:
-            if helper.is_non_integer_real_number(index):
-                # Indexing to a single position outside the extent
-                # will yield IndexError. Identical behaviour as numpy.ndarray
-                if not self._inside_domain(index):
-                    msg = 'Physical index position {} is outside of the ' \
-                          'extent {} of axis {}'.format(index, self.extent, str(self))
-                    raise IndexError(msg)
-                index = self._find_nearest_index(index)
+
+        if helper.is_non_integer_real_number(index):
+            # Indexing to a single position outside the extent
+            # will yield IndexError. Identical behaviour as numpy.ndarray
+            if not self._inside_domain(index):
+                msg = 'Physical index position {} is outside of the ' \
+                      'extent {} of axis {}'.format(index, self.extent, str(self))
+                raise IndexError(msg)
+            index = self._find_nearest_index(index)
+
+        if keepdim:
             return slice(index, index+1)
+        else:
+            return index
 
     def reversed(self):
         '''
@@ -443,6 +437,10 @@ class Axis(object):
         a slice containing floats or integers or a float or an integer
         """
         sl = self._normalize_slice(key)
+
+        if not isinstance(sl, slice):
+            return self.grid[sl]
+
         if not (sl.step is None or np.abs(sl.step) == 1):
             raise ValueError("slice.step must be 1, -1 or None (but is {})".format(sl.step))
         grid = self.grid[sl]
@@ -476,7 +474,7 @@ def _reducing_numpy_method(method):
         axisiter = axis
         if axisiter is None:
             axisiter = tuple(range(self.dimensions))
-        if not isinstance(axisiter, collections.Iterable):
+        if not isinstance(axisiter, Iterable):
             axisiter = (axis,)
 
         # no `out` argument supplied, we need to figure out the axes of the result
@@ -795,7 +793,7 @@ class Field(NDArrayOperatorsMixin):
                 ats = self.axes_transform_state[:]
                 tao = self.transformed_axes_origins[:]
                 reduceaxis = kwargs.get('axis', 0)
-                if not isinstance(reduceaxis, collections.Iterable):
+                if not isinstance(reduceaxis, Iterable):
                     reduceaxis = (reduceaxis,)
                 for axis in reversed(sorted(set(reduceaxis))):
                     del axes[axis]
@@ -917,6 +915,7 @@ class Field(NDArrayOperatorsMixin):
         new axisobj axisobj.
         '''
         axid = helper.axesidentify[axis]
+        axid = axid % 90
         if not len(axisobj) == self.shape[axid]:
             raise ValueError('Axis object has {:3n} grid points, whereas '
                              'the data matrix has {:3n} on axis {:1n}'
@@ -1106,7 +1105,7 @@ class Field(NDArrayOperatorsMixin):
                              'Please apply np.pad to the matrix by yourself and update the axes'
                              'as you like.')
 
-        if not isinstance(pad_width, collections.Iterable):
+        if not isinstance(pad_width, Iterable):
             pad_width = [pad_width]
 
         if len(pad_width) == 1:
@@ -1121,7 +1120,7 @@ class Field(NDArrayOperatorsMixin):
         padded_axes = []
 
         for i, axis_pad in enumerate(pad_width):
-            if not isinstance(axis_pad, collections.Iterable):
+            if not isinstance(axis_pad, Iterable):
                 axis_pad = [axis_pad, axis_pad]
 
             if len(axis_pad) > 2:
@@ -1183,7 +1182,7 @@ class Field(NDArrayOperatorsMixin):
         # Averaging over neighboring points
         s1[axis] = slice(0, lastpt, 2)
         s2[axis] = slice(1, lastpt, 2)
-        m = (ret.matrix[s1] + ret.matrix[s2]) / 2.0
+        m = (ret.matrix[tuple(s1)] + ret.matrix[tuple(s2)]) / 2.0
         ret._matrix = m
         ret.setaxisobj(axis, ret.axes[axis].half_resolution())
 
@@ -1290,6 +1289,8 @@ class Field(NDArrayOperatorsMixin):
         jacobian_func: callable
             a callable that returns the jacobian of the transform. If this is
             not given, the jacobian is numerically approximated.
+            In a 1D to 1D transform this callable should return just the derivative of the
+            transform wrapped in a 1-element iterable.
 
         **kwargs:
             Keyword arguments captured by `helper.map_coordinates_parallel`:
@@ -1315,6 +1316,12 @@ class Field(NDArrayOperatorsMixin):
                 return 1.0
 
         if preserve_integral:
+            if len(newaxes) != self.dimensions:
+                raise ValueError('Preserving the integral through preserve_integral=True is only'
+                                 'possible for transforms that have the same number of input and'
+                                 'output dimensions.'
+                                 'Please pass preserve_integral=False explicitly to allow such'
+                                 'transforms.')
             if jacobian_determinant_func is None:
                 if jacobian_func is None:
                     jacobian_func = helper.approx_jacobian(transform)
@@ -1425,6 +1432,14 @@ class Field(NDArrayOperatorsMixin):
                 \int_V \mathop{\mathrm{d}x} \mathop{\mathrm{d}y} U(x,y) =
                 \int_{T^{-1}(V)} \mathop{\mathrm{d}r}\mathop{\mathrm{d}\theta}
                 \tilde{U}(r,\theta)\,.
+
+            In a 1D to 1D transform, please make sure that the transform function returns the
+            coordinates in a 1-element list, e.g.
+
+            >>> def T(r):
+            >>>    x = 2*r**2
+            >>>    return [x]
+
         '''
         return self._map_coordinates(newaxes, transform=transform, complex_mode=complex_mode,
                                      preserve_integral=preserve_integral,
@@ -1477,7 +1492,7 @@ class Field(NDArrayOperatorsMixin):
         if axes is None:
             axes = range(field.dimensions)
 
-        if not isinstance(axes, collections.Iterable):
+        if not isinstance(axes, Iterable):
             axes = (axes, )
 
         if len(axes) != len(set(axes)):
@@ -1532,23 +1547,32 @@ class Field(NDArrayOperatorsMixin):
 
         return field.replace_data(ne.evaluate(expr, local_dict=local_dict, global_dict=None))
 
-    def squeeze(self):
+    def squeeze(self, axis=None):
         '''
         removes axes that have length 1, reducing self.dimensions.
 
         Note, that axis with length 0 will not be removed! `numpy.squeeze` also does not
         remove length=0 directions.
 
+        axis: None or int or tuple of ints, optional
+          Selects a subset of the single-dimensional entries in the shape. If an axis is selected
+          with shape entry greater than one, an error is raised.
+
         Same as `numpy.squeeze`.
         '''
-        ret = copy.copy(self)
-        retained_axes = [i for i in range(len(self.shape)) if len(self.axes[i]) != 1]
+        squeezed_axes = [i for i in range(len(self.shape)) if len(self.axes[i]) == 1]
+        if axis is not None:
+            if not isinstance(axis, iterable):
+                axis = (axis,)
+            for i in axis:
+                squeezed_axes.remove(i)
 
-        ret.axes = [self.axes[i] for i in retained_axes]
-        ret.axes_transform_state = [self.axes_transform_state[i] for i in retained_axes]
-        ret.transformed_axes_origins = [self.transformed_axes_origins[i] for i in retained_axes]
+        sl = [slice(None)] * len(self.shape)
+        for i in squeezed_axes:
+            sl[i] = 0
+
+        ret = self[sl]
 
-        ret._matrix = np.squeeze(ret.matrix)
         assert tuple(len(ax) for ax in ret.axes) == ret.shape
         return ret
 
@@ -1772,7 +1796,7 @@ class Field(NDArrayOperatorsMixin):
         if axes is None:
             axes = range(self.dimensions)
 
-        if not isinstance(axes, collections.Iterable):
+        if not isinstance(axes, Iterable):
             axes = (axes,)
 
         if method == 'constant':
@@ -1815,7 +1839,7 @@ class Field(NDArrayOperatorsMixin):
         index2 = [slice(None) for _ in range(self.dimensions)]
         index2[axis] = slice(0, -1)
 
-        deriv = (self.matrix[index1] - self.matrix[index2])/oldax.spacing
+        deriv = (self.matrix[tuple(index1)] - self.matrix[tuple(index2)])/oldax.spacing
 
         return Field(deriv, name=self.name + "'", unit=self.unit, axes=axes)
 
@@ -1879,7 +1903,7 @@ class Field(NDArrayOperatorsMixin):
         if axes is None:
             axes = range(self.dimensions)
 
-        if not isinstance(axes, collections.Iterable):
+        if not isinstance(axes, Iterable):
             axes = (axes,)
 
         pad = [0] * self.dimensions
@@ -1902,7 +1926,7 @@ class Field(NDArrayOperatorsMixin):
             axes = range(self.dimensions)
 
         # If axes is not a tuple, make it a one-tuple
-        if not isinstance(axes, collections.Iterable):
+        if not isinstance(axes, Iterable):
             axes = (axes,)
 
         dx = {i: self.axes[i].spacing for i in axes}
@@ -1948,7 +1972,7 @@ class Field(NDArrayOperatorsMixin):
             axes = range(self.dimensions)
 
         # If axes is not a tuple, make it a one-tuple
-        if not isinstance(axes, collections.Iterable):
+        if not isinstance(axes, Iterable):
             axes = (axes,)
 
         if exponential_signs not in ['spatial', 'temporal']:
@@ -1999,12 +2023,7 @@ class Field(NDArrayOperatorsMixin):
         # normalization factor ensuring Parseval's Theorem
         fftnorm = np.sqrt(V/Vk)
 
-        # compile fft arguments, starting from default arguments `fft_kwargs` ...
-        my_fft_args = fft_kwargs.copy()
-        # ... and adding the user supplied `kwargs`
-        my_fft_args.update(kwargs)
-        # ... and also norm = 'ortho'
-        my_fft_args['norm'] = 'ortho'
+        my_fft_args = dict(norm='ortho')
 
         # Workaround for missing `fft` argument `norm='ortho'`
         from pkg_resources import parse_version
@@ -2076,8 +2095,8 @@ class Field(NDArrayOperatorsMixin):
         It may be a list of the desired transform states for all the axes or a dictionary
         indicating the desired transform states of specific axes.
         """
-        if not isinstance(transform_states, collections.Mapping):
-            if not isinstance(transform_states, collections.Iterable):
+        if not isinstance(transform_states, Mapping):
+            if not isinstance(transform_states, Iterable):
                 transform_states = [transform_states] * self.dimensions
             transform_states = dict(enumerate(transform_states))
 
@@ -2118,15 +2137,18 @@ class Field(NDArrayOperatorsMixin):
 
         return ret
 
-    def _shift_grid_by_fourier(self, dx):
+    def _shift_grid_by_fourier(self, dx, skip_fft=False):
         axes = sorted(dx.keys())
 
         # transform to conjugate space, shift grid origin, transform back
         # all necessary phases are added by `fft()` method
-        ret = self.fft(axes)
+        ret = self
+        if not skip_fft:
+            ret = ret.fft(axes)
         for i in dx.keys():
             ret.transformed_axes_origins[i] += dx[i]
-        ret = ret.fft(axes)
+        if not skip_fft:
+            ret = ret.fft(axes)
 
         return ret
 
@@ -2169,7 +2191,7 @@ class Field(NDArrayOperatorsMixin):
         if interpolation not in methods.keys():
             raise ValueError("Requested method {} is not supported".format(interpolation))
 
-        if not isinstance(dx, collections.Mapping):
+        if not isinstance(dx, Mapping):
             dx = dict(enumerate(dx))
 
         dx = {helper.axesidentify[i]: v for i, v in dx.items()}
@@ -2276,32 +2298,39 @@ class Field(NDArrayOperatorsMixin):
         return [ax._extent_to_slice(ex) for ax, ex in zip(self.axes, extent)]
 
     def _normalize_slices(self, key):
-        if not isinstance(key, collections.Iterable):
+        if not isinstance(key, Iterable):
             key = (key,)
         if len(key) != self.dimensions:
             raise IndexError("{}D Field requires a {}-tuple of slices as index"
                              "".format(self.dimensions, self.dimensions))
 
-        return [ax._normalize_slice(sl) for ax, sl in zip(self.axes, key)]
+        return tuple(ax._normalize_slice(sl) for ax, sl in zip(self.axes, key))
 
     # Operator overloading
     def __getitem__(self, key):
         old_shape = self.shape
 
-        key = self._normalize_slices(key)
-        field = copy.copy(self)
-        field._matrix = field.matrix[key]
-        for i, sl in enumerate(key):
-            field.setaxisobj(i, field.axes[i][sl])
+        slices = self._normalize_slices(key)
+
+        retained_axes = list(range(len(self.shape)))
+        new_axes = []
+        for i, sl in enumerate(slices):
+            ax = self.axes[i][sl]
+            if isinstance(ax, Axis):
+                new_axes.append(ax)
+            else:
+                retained_axes.remove(i)
 
-        new_shape = field.shape
+        ret = copy.copy(self)
+        ret._matrix = ret.matrix[slices]
 
-        # This info is invalidated
-        for i, (o, n) in enumerate(zip(old_shape, new_shape)):
-            if o != n:
-                field.transformed_axes_origins[i] = None
+        ret.axes = [ret.axes[i] for i in retained_axes]
+        for i, ax in enumerate(new_axes):
+            ret.setaxisobj(i, ax)
+        ret.axes_transform_state = [ret.axes_transform_state[i] for i in retained_axes]
+        ret.transformed_axes_origins = [ret.transformed_axes_origins[i] for i in retained_axes]
 
-        return field
+        return ret
 
     def __setitem__(self, key, other):
         key = self._normalize_slices(key)
diff --git a/postpic/datareader/__init__.py b/postpic/datareader/__init__.py
index 86e3a46ce88c674140040a2c053dfa63c47acfca..49c8229007632bc29fb5879b1c1927b19c0318a3 100644
--- a/postpic/datareader/__init__.py
+++ b/postpic/datareader/__init__.py
@@ -157,9 +157,17 @@ def chooseCode(code):
         Possible options are:
           - "DUMMY": dummy class creating fake data.
           - "EPOCH": .sdf files written by EPOCH1D, EPOCH2D or EPOCH3D.
-          - "openPMD": .h5 files written in openPMD Standard
-          - "piconGPU": same as "openPMD"
+            ( https://cfsa-pmw.warwick.ac.uk/EPOCH/epoch )
+          - "openPMD": .h5 files written in openPMD Standard.
+            ( https://github.com/openPMD/ )
+          - "fbpic": .h5 files written by fbpic ( https://github.com/fbpic/fbpic ).
+            This is the openPMD reader, but includes the conversion from azimuthal modes
+            to a cylindrical grid.
+          - "piconGPU": same as "openPMD".
+            This reader can be used for piconGPU written h5 files.
+            ( https://github.com/ComputationalRadiationPhysics/picongpu )
           - "VSIM": .hdf5 files written by VSim.
+            Currently not working. Needs updating.
     '''
     if code.lower() in ['epoch', 'epoch1d', 'epoch2d', 'epoch3d']:
         from .epochsdf import Sdfreader, Visitreader
@@ -169,6 +177,10 @@ def chooseCode(code):
         from .openPMDh5 import OpenPMDreader, FileSeries
         setdumpreadercls(OpenPMDreader)
         setsimreadercls(FileSeries)
+    elif code.lower() in ['fbpic']:
+        from .openPMDh5 import FbpicReader, FbpicFileSeries
+        setdumpreadercls(FbpicReader)
+        setsimreadercls(FbpicFileSeries)
     elif code.lower() in ['vsim']:
         raise Exception('VSim reader requires update due to the interface change in '
                         'https://github.com/skuschel/postpic/commit/'
@@ -181,4 +193,4 @@ def chooseCode(code):
         setdumpreadercls(Dummyreader)
         setsimreadercls(Dummysim)
     else:
-        raise TypeError('Code "' + str(code) + '" not recognized.')
+        raise TypeError('Code "{}" not recognized.'.format(code))
diff --git a/postpic/datareader/datareader.py b/postpic/datareader/datareader.py
index 0d7884609a4859d08d3236c44ad5d52dfa745c4c..5dfb11980a9a45c62dcb96b8cc9d2f37861d0119 100644
--- a/postpic/datareader/datareader.py
+++ b/postpic/datareader/datareader.py
@@ -21,7 +21,12 @@ from __future__ import absolute_import, division, print_function, unicode_litera
 from future.utils import with_metaclass
 
 import abc
-import collections
+
+try:
+    from collections.abc import Sequence
+except ImportError:
+    from collections import Sequence
+
 import warnings
 import numpy as np
 from .. import helper
@@ -143,6 +148,9 @@ class Dumpreader_ifc(with_metaclass(abc.ABCMeta, FieldAnalyzer)):
                            offset + self.gridspacing(key, axis) * n,
                            n + 1)
 
+    def grid(self, key, axis):
+        return np.convolve(self.gridnode(key, axis), [0.5, 0.5], mode='valid')
+
 # --- Level 2 methods ---
 
     # --- General Information ---
@@ -295,7 +303,7 @@ class Dumpreader_ifc(with_metaclass(abc.ABCMeta, FieldAnalyzer)):
             return self.dumpidentifier == other.dumpidentifier
 
 
-class Simulationreader_ifc(collections.Sequence):
+class Simulationreader_ifc(Sequence):
     '''
     Interface for reading the data of a full Simulation.
 
@@ -304,7 +312,7 @@ class Simulationreader_ifc(collections.Sequence):
     the data of multiple dumps. In the easiest case this can be the .visit
     file.
 
-    The Simulationreader_ifc is subclass of collections.Sequence and will
+    The Simulationreader_ifc is subclass of Sequence and will
     thus behave as a Sequence. The Objects in the Sequence are supposed to be
     subclassed from Dumpreader_ifc.
 
diff --git a/postpic/datareader/epochsdf.py b/postpic/datareader/epochsdf.py
index f6d7d518c22afed8ac95c73cf9e210a51cbbe844..6d492fc38233c2fa1a30712b504421551b7cc944 100644
--- a/postpic/datareader/epochsdf.py
+++ b/postpic/datareader/epochsdf.py
@@ -167,7 +167,10 @@ class Sdfreader(Dumpreader_ifc):
         m = self['Grid/Grid']
         extents = m.extents
         dims = len(m.dims)
-        axid = helper.axesidentify[axis]
+        axid = helper.axesidentify.get(axis)
+        if axid is None or axid + dims >= len(extents):
+            s = 'Axis "{}" is not an axis of the simulation box. Extents are: "{}".'
+            raise KeyError(s.format(axis, extents))
         return np.array([extents[axid], extents[axid + dims]])
 
     def simgridpoints(self, axis):
diff --git a/postpic/datareader/openPMDh5.py b/postpic/datareader/openPMDh5.py
index 48d4958deec01218fd936ec7a27d3bad29fd34b3..22449337091899a8d86cfe75c88d6c690116beaa 100644
--- a/postpic/datareader/openPMDh5.py
+++ b/postpic/datareader/openPMDh5.py
@@ -14,7 +14,7 @@
 # You should have received a copy of the GNU General Public License
 # along with postpic. If not, see <http://www.gnu.org/licenses/>.
 #
-# Copyright Stephan Kuschel 2016
+# Copyright Stephan Kuschel, 2018-2019
 '''
 .. _openPMD: https://github.com/openPMD/openPMD-standard
 
@@ -33,7 +33,8 @@ import numpy as np
 import re
 from .. import helper
 
-__all__ = ['OpenPMDreader', 'FileSeries']
+__all__ = ['OpenPMDreader', 'FileSeries',
+           'FbpicReader', 'FbpicFileSeries']
 
 
 class OpenPMDreader(Dumpreader_ifc):
@@ -47,7 +48,7 @@ class OpenPMDreader(Dumpreader_ifc):
     '''
 
     def __init__(self, h5file, **kwargs):
-        super(self.__class__, self).__init__(h5file, **kwargs)
+        super(OpenPMDreader, self).__init__(h5file, **kwargs)
         import os.path
         import h5py
         if not os.path.isfile(h5file):
@@ -83,7 +84,7 @@ class OpenPMDreader(Dumpreader_ifc):
             ret = np.float64(record.attrs['value']) * record.attrs['unitSI']
         else:
             # array data
-            ret = np.float64(record.value) * record.attrs['unitSI']
+            ret = np.float64(record[()]) * record.attrs['unitSI']
         return ret
 
     def gridoffset(self, key, axis):
@@ -127,12 +128,12 @@ class OpenPMDreader(Dumpreader_ifc):
         raise KeyError('number of simdimensions could not be retrieved for {}'.format(self))
 
     def _keyE(self, component, **kwargs):
-        axsuffix = {0: 'x', 1: 'y', 2: 'z'}[helper.axesidentify[component]]
-        return 'fields/E/' + axsuffix
+        axsuffix = {0: 'x', 1: 'y', 2: 'z', 90: 'r', 91: 't'}[helper.axesidentify[component]]
+        return 'fields/E/{}'.format(axsuffix)
 
     def _keyB(self, component, **kwargs):
-        axsuffix = {0: 'x', 1: 'y', 2: 'z'}[helper.axesidentify[component]]
-        return 'fields/B/' + axsuffix
+        axsuffix = {0: 'x', 1: 'y', 2: 'z', 90: 'r', 91: 't'}[helper.axesidentify[component]]
+        return 'fields/B/{}'.format(axsuffix)
 
     def _simgridkeys(self):
         return ['fields/E/x', 'fields/E/y', 'fields/E/z',
@@ -148,21 +149,26 @@ class OpenPMDreader(Dumpreader_ifc):
         this particle species.
         """
         attribid = helper.attribidentify[attrib]
-        options = {9: lambda s: self.data('particles/' + s + '/weighting'),
-                   0: lambda s: self.data('particles/' + s + '/position/x') +
-                   self.data('particles/' + s + '/positionOffset/x'),
-                   1: lambda s: self.data('particles/' + s + '/position/y') +
-                   self.data('particles/' + s + '/positionOffset/y'),
-                   2: lambda s: self.data('particles/' + s + '/position/z') +
-                   self.data('particles/' + s + '/positionOffset/z'),
-                   3: lambda s: self.data('particles/' + s + '/momentum/x'),
-                   4: lambda s: self.data('particles/' + s + '/momentum/y'),
-                   5: lambda s: self.data('particles/' + s + '/momentum/z'),
-                   10: lambda s: self.data('particles/' + s + '/id'),
-                   11: lambda s: self.data('particles/' + s + '/mass'),
-                   12: lambda s: self.data('particles/' + s + '/charge')}
+        options = {9: 'particles/{}/weighting',
+                   0: 'particles/{}/position/x',
+                   1: 'particles/{}/position/y',
+                   2: 'particles/{}/position/z',
+                   3: 'particles/{}/momentum/x',
+                   4: 'particles/{}/momentum/y',
+                   5: 'particles/{}/momentum/z',
+                   10: 'particles/{}/id',
+                   11: 'particles/{}/mass',
+                   12: 'particles/{}/charge'}
+        optionsoffset = {0: 'particles/{}/positionOffset/x',
+                         1: 'particles/{}/positionOffset/y',
+                         2: 'particles/{}/positionOffset/z'}
+        key = options[attribid]
+        offsetkey = optionsoffset.get(attribid)
         try:
-            ret = np.float64(options[attribid](species))
+            data = self.data(key.format(species))
+            if offsetkey is not None:
+                data += self.data(offsetkey.format(species))
+            ret = np.asarray(data, dtype=np.float64)
         except(IndexError):
             raise KeyError
         return ret
@@ -173,7 +179,7 @@ class OpenPMDreader(Dumpreader_ifc):
         '''
         ret = []
         self['fields'].visit(ret.append)
-        ret = ['fields/' + r for r in ret if not (r.startswith('E') or r.startswith('B'))]
+        ret = ['fields/{}'.format(r) for r in ret if not (r.startswith('E') or r.startswith('B'))]
         ret = [r for r in ret if hasattr(self[r], 'value')]
         ret.sort()
         return ret
@@ -182,6 +188,116 @@ class OpenPMDreader(Dumpreader_ifc):
         return '<OpenPMDh5reader at "' + str(self.dumpidentifier) + '">'
 
 
+class FbpicReader(OpenPMDreader):
+    '''
+    Special OpenPMDreader for FBpic, which is using an expansion into radial modes.
+
+    This is subclass of the OpenPMDreader which is converting the modes to
+    a radial representation.
+    '''
+    def __init__(self, simidentifier, **kwargs):
+        super(FbpicReader, self).__init__(simidentifier, **kwargs)
+
+    @staticmethod
+    def modeexpansion(rawdata, theta=0):
+        '''
+        The mode representation will be expanded for a given theta.
+        rawdata has to have the shape (Nm, Nr, Nz).
+        the returned array will be of shape (Nr, Nz).
+        '''
+        rawdata = np.float64(rawdata)
+        (Nm, Nr, Nz) = rawdata.shape
+        mult_above_axis = [1]
+        for mode in range(1, int(Nm / 2) + 1):
+            cos = np.cos(mode * theta)
+            sin = np.sin(mode * theta)
+            mult_above_axis += [cos, sin]
+        mult_above_axis = np.float64(mult_above_axis)
+        F_total = np.tensordot(mult_above_axis,
+                               rawdata, axes=(0, 0))
+        assert F_total.shape == (Nr, Nz), \
+            '''
+            Assertion error. Please open a new issue on github to report this.
+            shape={}, Nr={}, Nz={}
+            '''.format(F_total.shape, Nr, Nz)
+        return F_total
+
+    @staticmethod
+    def _radialdata(rawdata, theta=0):
+        '''
+        converts to radial data using `modeexpansion`, possibly for multiple
+        theta at once.
+        '''
+        if np.asarray(theta).shape is ():
+            # single theta
+            theta = [theta]
+        # multiple theta
+        data = np.asarray([FbpicReader.modeexpansion(rawdata, theta=t) for t in theta])
+        # switch from (theta, r, z) to (r, theta, z)
+        data = data.swapaxes(0, 1)
+        return data
+
+    # override inherited method to count points after mode expansion
+    def gridoffset(self, key, axis):
+        axid = helper.axesidentify[axis]
+        if axid == 91:  # theta
+            return 0
+        else:
+            # r, theta, z
+            axidremap = {90: 0, 2: 1}[axid]
+            return super(FbpicReader, self).gridoffset(key, axidremap)
+
+    # override inherited method to count points after mode expansion
+    def gridspacing(self, key, axis):
+        axid = helper.axesidentify[axis]
+        if axid == 91:  # theta
+            return 2 * np.pi / self.gridpoints(key, axis)
+        else:
+            # r, theta, z
+            axidremap = {90: 0, 2: 1}[axid]
+            return super(FbpicReader, self).gridspacing(key, axidremap)
+
+    # override inherited method to count points after mode expansion
+    def gridpoints(self, key, axis):
+        axid = helper.axesidentify[axis]
+        axid = axid % 90  # for r and theta
+        (Nm, Nr, Nz) = self[key].shape
+        # Ntheta does technically not exists because of the mode
+        # representation. To do a proper conversion from the modes to
+        # the grid, choose Ntheta based on the number of modes.
+        # Nyquist should be `Ntheta = Nm`, but be generous and
+        # make sure that theta and -theta are
+        # always included: 2, 4, 8, 16, 32, 64,...
+        Ntheta = 2 * 2**np.ceil(np.log2(Nm))
+        return (Nr, Ntheta, Nz)[axid]
+
+    # override
+    def _defaultaxisorder(self, gridkey):
+        return ('r', 'theta', 'z')
+
+    # override from OpenPMDreader
+    def data(self, key, theta=None):
+        raw = super(FbpicReader, self).data(key)  # SI conversion
+        if key.startswith('particles'):
+            return raw
+        # for fields expand the modes into a spatial grid first:
+        if theta is None:
+            # guess a proper grid. Number of points defined in `self.gridpoints`
+            theta = self.grid(key, 'theta')
+        data = self._radialdata(raw, theta=theta)  # modeexpansion
+        return data
+
+    def dataE(self, component, theta=None, **kwargs):
+        return self.data(self._keyE(component, **kwargs), theta=theta)
+
+    def dataB(self, component, theta=None, **kwargs):
+        return self.data(self._keyB(component, **kwargs), theta=theta)
+
+    # override
+    def __str__(self):
+        return '<FbpicReader at "' + str(self.dumpidentifier) + '">'
+
+
 class FileSeries(Simulationreader_ifc):
     '''
     Reads a time series of dumps from a given directory.
@@ -190,7 +306,7 @@ class FileSeries(Simulationreader_ifc):
     '''
 
     def __init__(self, simidentifier, dumpreadercls=OpenPMDreader, **kwargs):
-        super(self.__class__, self).__init__(simidentifier, **kwargs)
+        super(FileSeries, self).__init__(simidentifier, **kwargs)
         self.dumpreadercls = dumpreadercls
         import glob
         self._dumpfiles = glob.glob(simidentifier)
@@ -208,3 +324,10 @@ class FileSeries(Simulationreader_ifc):
 
     def __str__(self):
         return '<FileSeries at "' + self.simidentifier + '">'
+
+
+class FbpicFileSeries(FileSeries):
+
+    def __init__(self, *args, **kwargs):
+        super(FbpicFileSeries, self).__init__(*args, **kwargs)
+        self.dumpreadercls = FbpicReader
diff --git a/postpic/helper.py b/postpic/helper.py
index d98fc2d6f4af5df765a07f43938985323142f930..4271a38e1492ffe71d5c7bbaf2d4a667fcb74e8a 100644
--- a/postpic/helper.py
+++ b/postpic/helper.py
@@ -78,6 +78,8 @@ _filterwarnings()
 axesidentify = {'X': 0, 'x': 0, 0: 0,
                 'Y': 1, 'y': 1, 1: 1,
                 'Z': 2, 'z': 2, 2: 2,
+                'R': 90, 'r': 90, 90: 90,
+                'theta': 91, 't': 91, 91: 91,
                 None: slice(None)}
 attribidentify = axesidentify.copy()
 attribidentify.update({'PX': 3, 'Px': 3, 'px': 3, 3: 3,
@@ -326,9 +328,15 @@ def jac_det(jacobian_func):
 
     det_fun = jac_det(polar2linear_jac)
     def = det_fun(theta, r)
+
+    In the case of a scalar function, this function expects jacobian_func to return
+    the derivative in a 1-element iterable. This behaviour is compatible to the output of
+    `np.gradient` and `approx_jacobian`.
     '''
     def fun(*coords):
         jac = jacobian_func(*coords)
+        if len(jac) == 1:
+            return abs(jac[0])
         shape = np.broadcast(*coords).shape
         jacarray = np.asarray([[broadcast_to(a, shape) for a in row] for row in jac])
         jacarray = moveaxis(jacarray, [0, 1], [-2, -1])
@@ -380,7 +388,11 @@ def approx_jacobian(transform):
                                      'non-equidistant grid is not implemented for numpy < 1.13.')
             ravel_coords = [(r[-1]-r[0])/(len(r)-1) for r in ravel_coords]
 
-        shape = np.broadcast(*coords).shape
+        if len(coords) == 1:
+            # this specific case breaks np.broadcast in numpy 1.8.1 and older
+            shape = coords[0].shape
+        else:
+            shape = np.broadcast(*coords).shape
         mapped_coords = transform(*coords)
         mapped_coords = [broadcast_to(c, shape) for c in mapped_coords]
         jac = [np.gradient(c, *ravel_coords) for c in mapped_coords]
@@ -896,10 +908,9 @@ def kspace(component, fields, extent=None, interpolation=None, omega_func=omega_
 
             field = field.ensure_frequency_domain()
 
-            # fourier interpolation is done after the fft by applying a linear phase
+            # fourier interpolation is done implicitly by the fft code
             if interpolation == 'fourier':
-                # print('apply linear phase')
-                field = field._apply_linear_phase(dict(enumerate(grid_shift)))
+                field = field._shift_grid_by_fourier(dict(enumerate(grid_shift)), skip_fft=True)
 
             # add the field to the result with the appropriate prefactor
             # result.matrix += (-1)**(i-1) * prefactor * mesh[mesh_i] * field.matrix
@@ -1210,7 +1221,7 @@ def time_profile_at_plane(kspace_or_complex_field, axis='x', value=None, dir=1,
         slices[axis] = i
         km = kspace_prop.matrix
         # newmat[slices] = np.sum(kspace_prop.matrix, axis=axis)
-        newmat[slices] = ne.evaluate(expr)
+        newmat[tuple(slices)] = ne.evaluate(expr)
 
     k_transverse_tprofile = kspace.replace_data(newmat)
     t_axis = datahandling.Axis(name='t', unit='s',
diff --git a/postpic/helper_fft.py b/postpic/helper_fft.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc57a24bdcb8ac82cf4d0407762cc6b65b12dc45
--- /dev/null
+++ b/postpic/helper_fft.py
@@ -0,0 +1,91 @@
+#
+# This file is part of postpic.
+#
+# postpic is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# postpic is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with postpic. If not, see <http://www.gnu.org/licenses/>.
+#
+# Stephan Kuschel 2019
+# Alexander Blinne, 2017-2019
+"""
+Helper for fft functions.
+
+If available, pyffw will be used to speed up calculations efficiently.
+"""
+import sys
+
+if sys.version[0] == '2':
+    import functools32 as functools
+else:
+    import functools
+
+
+__all__ = ['fft']
+
+
+class _fft:
+
+    try:
+        import psutil
+        nproc = psutil.cpu_count(logical=False)
+    except ImportError:
+        try:
+            import os
+            nproc = os.cpu_count()
+        except AttributeError:
+            import multiprocessing
+            nproc = multiprocessing.cpu_count()
+
+    try:
+        # pyfftw is, in most situations, faster than numpys fft,
+        # although pyfftw will benefit from multithreading only on very large arrays
+        # on a 720x240x240 3D transform multithreading still doesn't give a large benefit
+        # benchmarks of a 720x240x240 transform of real data on a Intel(R) Xeon(R) CPU
+        # E5-1620 v4 @ 3.50GHz:
+        # numpy.fft: 3.6 seconds
+        # pyfftw, nproc=4: first transform 2.2s, further transforms 1.8s
+        # pyfftw, nproc=1: first transform 3.4s, further transforms 2.8s
+        # Try to import pyFFTW's numpy_fft interface
+        import pyfftw.interfaces.cache as fftw_cache
+        import pyfftw.interfaces.numpy_fft as fftw
+        fftw_cache.enable()
+        fft_module = fftw
+        fft_kwargs = dict(planner_effort='FFTW_ESTIMATE', threads=nproc)
+    except ImportError:
+        # pyFFTW is not available, just import numpys fft
+        import numpy.fft as fft_module
+        fft_kwargs = dict()
+
+    _fft_functions = ['fft', 'ifft', 'fft2', 'ifft2', 'fftn',
+                      'ifftn', 'rfft', 'irfft', 'rfft2', 'irfft2', 'irfftn',
+                      'hfft', 'ihfft']
+
+    @classmethod
+    def _get_defaultkwargf(cls, name):
+        wrapped = getattr(cls.fft_module, name)
+
+        @functools.wraps(wrapped)
+        def ret(*args, **kwargs):
+            kws = cls.fft_kwargs.copy()
+            kws.update(kwargs)
+            return wrapped(*args, **kws)
+        return ret
+
+    def __getattr__(self, attr):
+        return getattr(self.fft_module, attr)
+
+    def __init__(self):
+        for fftf in self._fft_functions:
+            setattr(self, fftf, self._get_defaultkwargf(fftf))
+
+
+fft = _fft()
diff --git a/postpic/io/image.py b/postpic/io/image.py
index 4323a5f26f83cc62035c17386e6312479a6ba8dc..7e7fa2f05bad975c8871479ce5798e5193468e15 100644
--- a/postpic/io/image.py
+++ b/postpic/io/image.py
@@ -20,6 +20,8 @@
 The postpic.io subpackage provides free functions for importing and exporting data.
 '''
 
+import warnings
+
 import os.path as osp
 
 import numpy as np
@@ -33,7 +35,7 @@ def _import_field_image(filename, hotpixelremove=False):
     if filename.lower().endswith('png'):
         data = _readpng(filename)
     else:
-        data = _read_image_pil(filename)
+        data = _read_image(filename).astype(np.float64)
 
     if hotpixelremove:
         import scipy.ndimage
@@ -70,40 +72,57 @@ def _readpng(filename):
     Args:
         filename (str): Path and filename to the file to open
 
-    kwargs:
-        hotpixelremove (bool): Removes hotpixels if True. (Default: True)
-
     Returns:
         numpy.array: the png data as numpy array converted to float64
 
-    Author: Stephan Kuschel, 2015-2016
+    Author: Stephan Kuschel, 2015-2016, Alexander Blinne 2019
     '''
-    import png  # pypng
-    import scipy.misc as sm
-    import numpy as np
+    have_pypng = False
+    data = _read_image(filename)
+    try:
+        import png  # pypng
+        have_pypng = True
+    except ImportError:
+        pass
+
+    if not have_pypng:
+        warnings.warn('Can not import `PyPNG`. Png file can be imported, however it is not '
+                      'guaranteed, that the count values will be correct for non 8 or 16 bit '
+                      'images.')
+        return data.astype(np.float64)
+
     meta = png.Reader(filename)
     meta.preamble()
-    ret = sm.imread(filename)
     if meta.sbit is not None:
         significant_bits = ord(meta.sbit)
-        ret >>= 16 - significant_bits
+        data >>= 16 - significant_bits
     # else: 8 bit image, no need to modify data
-    return np.float64(ret)
+    return data.astype(np.float64)
 
 
-def _read_image_pil(filename):
+def _read_image(filename):
     '''
-    reads an image file using PIL.Image.open.
+    reads an image file using imageio.imread or PIL.Image.open.
 
     Args:
         filename (str): Path and filename to the file to open
 
     Returns:
-        numpy.array: the image data as numpy array converted to float64
+        numpy.array: the image data as numpy array
 
-    Author: Stephan Kuschel, 2015-2016
+    Author: Stephan Kuschel, 2015-2016, Alexander Blinne 2019
     '''
+    data = None
+    try:
+        import imageio
+        data = imageio.imread(filename)
+    except ImportError:
+        pass
+
+    if data is not None:
+        return data
+
     from PIL import Image
     im = Image.open(filename)
-    data = np.array(im, dtype=np.float64)
+    data = np.array(im)
     return data
diff --git a/postpic/io/vtk.py b/postpic/io/vtk.py
index 8fa2c0f4607786333a5db06fa5d8ae215fdc83f7..edc82db871b934eda333b2fd847b182e3e937918 100644
--- a/postpic/io/vtk.py
+++ b/postpic/io/vtk.py
@@ -224,7 +224,7 @@ class ArrayData(object):
         self.name = str(self.name).replace(' ', '_')
 
     def transform_data(self, dtype):
-        data = np.vstack((np.ravel(f, order='F') for f in self.fields))
+        data = np.vstack(tuple(np.ravel(f, order='F') for f in self.fields))
         data = data.ravel(order='F').astype(dtype)
         return data
 
diff --git a/postpic/particles/_routines.py b/postpic/particles/_routines.py
index ea52b76dd70fee166f16039c57291bd8218e5770..7ce8b5d3aad3e298d57948011dd5d8c3ad55eea7 100644
--- a/postpic/particles/_routines.py
+++ b/postpic/particles/_routines.py
@@ -21,7 +21,11 @@ Particle related functions.
 from __future__ import absolute_import, division, print_function, unicode_literals
 
 import numpy as np
-import collections
+
+try:
+    from collections.abc import Iterable
+except ImportError:
+    from collections import Iterable
 
 from . import _particlestogrid as ptg
 from ..helper import PhysicalConstants
@@ -92,7 +96,7 @@ def histogramdd(data, **kwargs):
         data = (data, )  # ([1,2,3],)
     if len(data) > 3:
         raise ValueError('Data with len {:} not supported. Maximum is 3D data.'.format(len(data)))
-    if isinstance(kwrange, collections.Iterable) and np.isscalar(kwrange[0]):
+    if isinstance(kwrange, Iterable) and np.isscalar(kwrange[0]):
         kwrange = (kwrange, )
     if np.isscalar(kwbins):
         kwbins = (kwbins, ) * len(data)
@@ -173,6 +177,9 @@ class SpeciesIdentifier(PhysicalConstants):
                  'Photon': _specdict(0, 0, False),
                  'Positron': _specdict(1, 1, False),
                  'positron': _specdict(1, 1, False),
+                 'bw_positron': _specdict(1, 1, False),
+                 'bw_electron': _specdict(1, -1, False),
+                 'photon': _specdict(0, 0, False),
                  'gold1': _specdict(1836.2 * 197, 1, True),
                  'gold3': _specdict(1836.2 * 197, 3, True),
                  'gold4': _specdict(1836.2 * 197, 4, True),
diff --git a/postpic/particles/particles.py b/postpic/particles/particles.py
index 23bd21ec7f71aa3353073b346729ab7163d5920a..f2565354813cbce879b8159e01ba757e8cc0a8b0 100644
--- a/postpic/particles/particles.py
+++ b/postpic/particles/particles.py
@@ -669,7 +669,7 @@ class MultiSpecies(object):
             # Happens, if only missing species were added with
             # ignore_missing_species = True.
             return np.array([])
-        data = (ssdata(ss) for ss in self._ssas)
+        data = tuple(ssdata(ss) for ss in self._ssas)
         return np.hstack(data)
 
     def __call_func(self, func):
diff --git a/postpic/particles/scalarproperties.py b/postpic/particles/scalarproperties.py
index 3b23b7a8bc97685b9bf369a1ab29b86efea59206..f94286a6b7921b77eb99bf34f93a3a8289db3b22 100644
--- a/postpic/particles/scalarproperties.py
+++ b/postpic/particles/scalarproperties.py
@@ -19,7 +19,12 @@
 from __future__ import absolute_import, division, print_function, unicode_literals
 
 import warnings
-import collections
+
+try:
+    from collections.abc import Mapping
+except ImportError:
+    from collections import Mapping
+
 import numexpr as ne
 
 __all__ = ['ScalarProperty']
@@ -96,7 +101,7 @@ class ScalarProperty(object):
         return formatstring.format(**dict(self))
 
 
-class ScalarPropertyContext(collections.Mapping):
+class ScalarPropertyContext(Mapping):
 
     def __init__(self):
         '''
@@ -176,14 +181,18 @@ _defaultscalars = [
     ScalarProperty('pz', 'pz', 'kg*m/s'),
     ScalarProperty('sqrt(px**2 + py**2 + pz**2)', 'p', 'kg*m/s'),
     ScalarProperty('(px**2 + py**2 + pz**2)/(mass * c)**2', '_np2', ''),
-    ScalarProperty('_np2 / (sqrt(1 + _np2) + 1)', 'gamma_m1', ''),
+    ScalarProperty('_np2 / (sqrt(1 + _np2) + 1)', 'gamma_m1', ''),  # gamma - 1
     ScalarProperty('_np2 / (sqrt(1 + _np2) + 1) + 1', 'gamma', ''),
-    ScalarProperty('gamma * mass', 'gamma_m', 'kg'),
+    ScalarProperty('gamma * mass', 'gamma_m', 'kg'),  # gamma * mass
     ScalarProperty('beta * c', 'v', 'm/s'),
     ScalarProperty('px / (gamma * mass)', 'vx', 'm/s'),
     ScalarProperty('py / (gamma * mass)', 'vy', 'm/s'),
     ScalarProperty('pz / (gamma * mass)', 'vz', 'm/s'),
-    ScalarProperty('sqrt(gamma**2 - 1) / gamma', 'beta', ''),
+    # beta = sqrt(gamma**2 - 1) / gamma
+    #      = sqrt(gamma_m1**2 + 2 * gamma_m1) / gamma
+    #      = sqrt(gamma_m1**2 + 2 * gamma_m1) / (gamma_m1 + 1)
+    # numerically more stable and gamma_m1 has to be calculated just once.
+    ScalarProperty('sqrt(gamma_m1**2 + 2 * gamma_m1) / (gamma_m1 + 1)', 'beta', ''),
     ScalarProperty('vx / c', 'betax', ''),
     ScalarProperty('vy / c', 'betay', ''),
     ScalarProperty('vz / c', 'betaz', ''),
diff --git a/run-tests.py b/run-tests.py
index cf29ab5e7dd0a6b74982bcc1b6305aec3f194898..37343ecf42b6c1a3b95b071b802ed355bee1aa97 100755
--- a/run-tests.py
+++ b/run-tests.py
@@ -63,7 +63,8 @@ def run_alltests(python='python', fast=False, skip_setup=False):
     cmdrpl = dict(python=python)
     # make sure .pyx sources are up to date and compiled
     if not skip_setup:
-        runcmd('{python} setup.py develop --user'.format(**cmdrpl))
+        # should be the same as `./setup.py develop --user`
+        runcmd('{python} -m pip install --user -e .'.format(**cmdrpl))
 
     # find pep8 or pycodestyle (its successor)
     try:
@@ -87,7 +88,8 @@ def run_alltests(python='python', fast=False, skip_setup=False):
             '{python} ' + os.path.join('examples', 'simpleexample.py'),
             '{python} ' + os.path.join('examples', 'particleshapedemo.py'),
             '{python} ' + os.path.join('examples', 'time_cythonfunctions.py'),
-            '{python} ' + os.path.join('examples', 'openPMD.py')]
+            '{python} ' + os.path.join('examples', 'openPMD.py'),
+            '{python} ' + os.path.join('examples', 'kspace-test-2d.py')]
     if not fast:
         cmds += cmdo
     for cmd in cmds:
diff --git a/test/test_datahandling.py b/test/test_datahandling.py
index 61d0ae4c4cb67c742682f66471c86896dca0c3f4..ce38b42e1109a7e8fd0e73a387959b5a47398b31 100755
--- a/test/test_datahandling.py
+++ b/test/test_datahandling.py
@@ -102,6 +102,16 @@ class TestAxis(unittest.TestCase):
         with self.assertRaises(TypeError):
             dh.Axis(extent=(0,1), n=99, unknownarg=0)
 
+    def test_grid_spacing(self):
+        ax = dh.Axis(grid=[1])  # ok
+        with self.assertRaises(ValueError):
+            ax = dh.Axis(grid=[1,1])
+
+    def test_grid_node_spacing(self):
+        ax = dh.Axis(grid_node=[1])  # ok
+        with self.assertRaises(ValueError):
+            ax = dh.Axis(grid_node=[1,1])
+
     def test_reversed(self, n=100):
         ax = dh.Axis(extent=[-1,1], n=n)
         axri = dh.Axis(extent=[1,-1], n=n)
@@ -261,11 +271,14 @@ class TestField(unittest.TestCase):
         f1d_slice = self.f1d[0.15:0.75]
         self.assertTrue(np.all(f1d_slice.grid >= 0.15))
         self.assertTrue(np.all(f1d_slice.grid <= 0.75))
-        self.assertEqual(self.f1d[5].shape, (1,))
+        self.assertEqual(self.f1d[dh.KeepDim(5)].shape, (1,))
+        self.assertEqual(self.f1d[dh.KeepDim(slice(5,6))].shape, (1,))
+        self.assertEqual(self.f1d[5].shape, ())
 
         self.assertEqual(self.f2d[0.5:, :].shape, (2, 5))
 
-        self.assertEqual(self.f3d[0.5:, :, 0.5].shape, (2, 5, 1))
+        self.assertEqual(self.f3d[0.5:, :, dh.KeepDim(0.5)].shape, (2, 5, 1))
+        self.assertEqual(self.f3d[0.5:, :, 0.5].shape, (2, 5))
 
     def test_cutout(self):
         f1d_cutout = self.f1d.cutout((0.15, 0.75))
@@ -670,7 +683,7 @@ class TestField(unittest.TestCase):
 
     def test_operators_broadcasting(self):
         a = self.f2d
-        b = self.f2d[0.5, :]
+        b = self.f2d[dh.KeepDim(0.5), :]
 
         self.assertEqual(a.shape, (4,5))
         self.assertEqual(b.shape, (1,5))
diff --git a/test/test_helper.py b/test/test_helper.py
index 33c33373ad6bab5357a928a3198069cf2bec5a4c..53e93581ee2421e63ebd4a24f83cb5046403ce8e 100755
--- a/test/test_helper.py
+++ b/test/test_helper.py
@@ -172,6 +172,71 @@ class TestHelper(unittest.TestCase):
         self.assertAllClose(f2, f3, atol=0.003)
         self.assertAllClose(f3, f4)
 
+    def test_approx_jacobian_1d(self):
+        def fun(x):
+            return [x**2]
+
+        def fun_jac(x):
+            return [2*x]
+
+        def fun_jd(x):
+            [da_dx] = fun_jac(x)
+            return abs(da_dx)
+
+        x = np.linspace(0.5, 2, 128)
+
+        fd = fun_jac(x)
+        fun_jac_approx = pp.helper.approx_jacobian(fun)
+        fda = fun_jac_approx(x)
+
+        self.assertAllClose(fd, fda,
+                            atol=0.02)
+
+        jd = fun_jd(x)
+        jd_approx = pp.helper.jac_det(fun_jac)(x)
+        jd_approx2 = pp.helper.jac_det(fun_jac_approx)(x)
+
+        self.assertAllClose(jd, jd_approx)
+        self.assertAllClose(jd, jd_approx2, rtol=0.02)
+
+
+    def test_approx_jacobian_2d(self):
+        def fun(x, y):
+            a = x**2
+            b = y**2 + x*y
+            return a, b
+
+        def fun_jac(x, y):
+            da_dx = 2*x
+            da_dy = 0
+            db_dx = y
+            db_dy = 2*y + x
+            return [[da_dx, da_dy], [db_dx, db_dy]]
+
+        def fun_jd(x, y):
+            [[da_dx, da_dy], [db_dx, db_dy]] = fun_jac(x, y)
+            return abs(da_dx * db_dy - da_dy * db_dx)
+
+        x = np.linspace(0.5, 2, 128)[:, np.newaxis]
+        y = np.linspace(2, 4, 128)[np.newaxis, :]
+
+        jac = fun_jac(x, y)
+        fun_jac_approx = pp.helper.approx_jacobian(fun)
+        jac_approx = fun_jac_approx(x, y)
+
+        for i in (0,1):
+            for j in (0,1):
+                self.assertAllClose(*np.broadcast_arrays(jac[i][j],
+                                                         jac_approx[i][j]),
+                                    atol=0.02)
+
+        jd = fun_jd(x, y)
+        jd_approx = pp.helper.jac_det(fun_jac)(x, y)
+        jd_approx2 = pp.helper.jac_det(fun_jac_approx)(x, y)
+
+        self.assertAllClose(jd, jd_approx)
+        self.assertAllClose(jd, jd_approx2, rtol=0.03)
+
 
 if __name__ == '__main__':
     unittest.main()