amun-code/python/amunpy.py
Grzegorz Kowal 8c0e6dea29 PYTHON: Add argument 'maxlev' to function dataset().
This argument is an alternative way to reduce the resolution of the
data. It specifies that the output cube resolution should be
corresponding to the 'maxlev' refinement level.

If the maximum level of simulation changes during the evolution, the
parameter allows to read all snapshots with the same output resolution.

Signed-off-by: Grzegorz Kowal <grzegorz@amuncode.org>
2021-01-05 10:48:00 -03:00

1498 lines
59 KiB
Python

"""
================================================================================
This file is part of the AMUN source code, a program to perform
Newtonian or relativistic magnetohydrodynamical simulations on uniform or
adaptive mesh.
Copyright (C) 2018-2020 Grzegorz Kowal <grzegorz@amuncode.org>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
================================================================================
module: AMUN
Python module with subroutines to read AMUN code HDF5 files.
The only requirements for this package are:
- h5py
- numpy
- scipy (optional, for data interpolation)
- xxhash (optional, for data consistency verification)
- zstd (optional, for data compression)
--------------------------------------------------------------------------------
"""
import h5py as h5
import numpy as np
import os.path as op
import sys
try:
from scipy.interpolate import splrep, splev, interp1d, pchip_interpolate
scipy_available = True
except ImportError:
scipy_available = False
try:
from xxhash import *
xxhash_available = True
except ImportError:
xxhash_available = False
try:
import zstandard as zstd
zstd_available = True
except ImportError:
zstd_available = False
try:
import lz4.frame as lz4
lz4_available = True
except ImportError:
lz4_available = False
try:
import lzma
lzma_available = True
except ImportError:
lzma_available = False
class AmunXML:
"""AMUN XML snapshot class"""
def __init__(self, path):
import xml.etree.ElementTree as ET
if op.isdir(path):
mfile = op.join(path, 'metadata.xml')
if op.exists(mfile):
self.path = path
self.metafile = mfile
self.format = "xml"
else:
print("{} does not contain 'metadata.xml'!".format(path))
return None
else:
mfile = op.basename(path)
if mfile == 'metadata.xml':
self.path = op.dirname(path)
self.metafile = path
self.format = "xml"
else:
print("{} is not 'metadata.xml'!".format(path))
return None
tree = ET.parse(self.metafile)
root = tree.getroot()
if root.tag == 'AMUNFile':
self.attributes = dict()
self.binaries = dict()
for child in root:
if child.tag == 'BinaryFiles':
for item in child:
self.binaries[item.attrib['name']] = item.attrib
self.binaries[item.attrib['name']]['file'] = item.text
else:
for item in child:
if item.tag == 'Attribute':
if item.attrib['type'] == 'double':
self.attributes[item.attrib['name']] = float(item.text)
elif item.attrib['type'] == 'integer':
self.attributes[item.attrib['name']] = int(item.text)
else:
self.attributes[item.attrib['name']] = item.text
else:
print("{} is not an AMUN snapshot!".format(self.metafile))
return None
self.chunks = dict()
for n in range(self.attributes['nprocs']):
mfile = op.join(self.path, 'datablocks_{:06d}.xml'.format(n))
tree = ET.parse(mfile)
root = tree.getroot()
self.chunks[n] = dict()
for item in root.findall('./BinaryFiles/Attribute'):
self.chunks[n][item.attrib['name']] = item.attrib
self.chunks[n][item.attrib['name']]['file'] = item.text
for item in root.findall('./DataBlocks/Attribute'):
if item.attrib['name'] == 'dblocks':
self.chunks[n]['dblocks'] = int(item.text)
if 'variables' in self.attributes:
variables = []
for var in self.attributes['variables'].split():
variables.append(var)
variables.append('level')
if 'dens' in variables:
variables.append('logd')
if 'pres' in variables:
variables.append('logp')
if all(v in variables for v in ['velx','vely','velz']):
variables.append('velo')
variables.append('divv')
variables.append('vorx')
variables.append('vory')
variables.append('vorz')
variables.append('vort')
if all(v in variables for v in ['magx','magy','magz']):
variables.append('magn')
variables.append('divb')
variables.append('curx')
variables.append('cury')
variables.append('curz')
variables.append('curr')
if 'pres' in variables and 'adiabatic_index' in self.attributes:
variables.append('eint')
if all(v in variables for v in ['dens','pres']):
variables.append('temp')
if self.attributes['eqsys'] in [ 'hd', 'mhd' ] \
and all(v in variables for v in ['dens','velo']):
variables.append('ekin')
if self.attributes['eqsys'] in [ 'mhd', 'srmhd' ] \
and 'magn' in variables:
variables.append('emag')
if 'velo' in variables and 'magn' in variables:
variables.append('elex')
variables.append('eley')
variables.append('elez')
variables.append('eele')
if all(v in variables for v in ['ekin','eint']):
variables.append('etot')
if self.attributes['eqsys'] in [ 'srhd', 'srmhd' ] \
and 'velo' in variables:
variables.append('lore')
else:
print("Cannot determine available variables from {}!".format(self.metafile))
return None
self.attributes['xlen'] = self.attributes['xmax'] - self.attributes['xmin']
self.attributes['ylen'] = self.attributes['ymax'] - self.attributes['ymin']
if self.attributes['ndims'] == 3:
self.attributes['zlen'] = self.attributes['zmax'] - self.attributes['zmin']
self.cell_size = dict()
for l in range(self.attributes['maxlev']):
n = self.attributes['xblocks'] * self.attributes['ncells'] * 2**l
h = self.attributes['xlen'] / n
self.cell_size[l+1] = h
self.variables = variables
def check_digest(self, dfile, data, digest):
if xxhash_available:
if digest.lower() != xxh64(data).hexdigest():
print("File '{}' seems to be corrupted!".format(dfile))
def read_binary_meta(self, aname, dtype='int32'):
import numpy as np
dfile = op.join(self.path, self.binaries[aname]['file'])
if op.exists(dfile):
if 'compression_format' in self.binaries[aname]:
cformat = self.binaries[aname]['compression_format']
if cformat == 'zstd':
if zstd_available:
dctx = zstd.ZstdDecompressor()
with open(dfile, mode ='rb') as f:
compressed = f.read()
if 'compressed_digest' in self.binaries[aname]:
self.check_digest(dfile, compressed, self.binaries[aname]['compressed_digest'])
data = dctx.decompress(compressed)
if 'digest' in self.binaries[aname]:
self.check_digest(dfile, data, self.binaries[aname]['digest'])
return np.frombuffer(data, dtype=dtype)
else:
print('ZSTD module unavailable. Please, install Zstandard for Python.')
return None
elif cformat == 'lz4':
if lz4_available:
with open(dfile, mode ='rb') as f:
compressed = f.read()
if 'compressed_digest' in self.binaries[aname]:
self.check_digest(dfile, compressed, self.binaries[aname]['compressed_digest'])
data = lz4.decompress(compressed)
if 'digest' in self.binaries[aname]:
self.check_digest(dfile, data, self.binaries[aname]['digest'])
return np.frombuffer(data, dtype=dtype)
else:
print('LZ4 module unavailable. Please, install LZ4 for Python.')
return None
elif cformat == 'lzma':
if lzma_available:
with open(dfile, mode ='rb') as f:
compressed = f.read()
if 'compressed_digest' in self.binaries[aname]:
self.check_digest(dfile, compressed, self.binaries[aname]['compressed_digest'])
data = lzma.decompress(compressed)
if 'digest' in self.binaries[aname]:
self.check_digest(dfile, data, self.binaries[aname]['digest'])
return np.frombuffer(data, dtype=dtype)
else:
print('LZMA module unavailable. Please, install LZMA for Python.')
return None
else:
print('Compression format {} unavailable. Please, install corresponding Python module'.format(cformat))
return None
else:
with open(dfile, mode ='rb') as f:
data = f.read()
if 'digest' in self.binaries[aname]:
self.check_digest(dfile, data, self.binaries[aname]['digest'])
return np.frombuffer(data, dtype=dtype)
else:
return None
def read_binary_data(self, n, var, dtype='float64'):
import numpy as np
dfile = op.join(self.path, self.chunks[n][var]['file'])
if op.exists(dfile):
if 'compression_format' in self.chunks[n][var]:
cformat = self.chunks[n][var]['compression_format']
if cformat == 'zstd':
if zstd_available:
dctx = zstd.ZstdDecompressor()
with open(dfile, mode ='rb') as f:
compressed = f.read()
if 'compressed_digest' in self.chunks[n][var]:
self.check_digest(dfile, compressed, self.chunks[n][var]['compressed_digest'])
data = dctx.decompress(compressed)
if 'digest' in self.chunks[n][var]:
self.check_digest(dfile, data, self.chunks[n][var]['digest'])
return np.frombuffer(data, dtype=dtype)
else:
print('ZSTD module unavailable. Please, install Zstandard for Python.')
return None
elif cformat == 'lz4':
if lz4_available:
with open(dfile, mode ='rb') as f:
compressed = f.read()
if 'compressed_digest' in self.chunks[n][var]:
self.check_digest(dfile, compressed, self.chunks[n][var]['compressed_digest'])
data = lz4.decompress(compressed)
if 'digest' in self.chunks[n][var]:
self.check_digest(dfile, data, self.chunks[n][var]['digest'])
return np.frombuffer(data, dtype=dtype)
else:
print('LZ4 module unavailable. Please, install LZ4 for Python.')
return None
elif cformat == 'lzma':
if lzma_available:
with open(dfile, mode ='rb') as f:
compressed = f.read()
if 'compressed_digest' in self.chunks[n][var]:
self.check_digest(dfile, compressed, self.chunks[n][var]['compressed_digest'])
data = lzma.decompress(compressed)
if 'digest' in self.chunks[n][var]:
self.check_digest(dfile, data, self.chunks[n][var]['digest'])
return np.frombuffer(data, dtype=dtype)
else:
print('LZMA module unavailable. Please, install LZMA for Python.')
return None
else:
print('Compression format {} unavailable. Please, install corresponding Python module'.format(cformat))
return None
else:
with open(dfile, mode ='rb') as f:
data = f.read()
if 'digest' in self.chunks[n][var]:
self.check_digest(dfile, data, self.chunks[n][var]['digest'])
return np.frombuffer(data, dtype=dtype)
else:
return None
def dataset(self, var, extent=None, shrink=1, maxlev=-1, interpolation='rebin', progress=False):
import numpy as np
import xml.etree.ElementTree as ET
if not var in self.variables:
print("Dataset '{}' not available in the snapshot!".format(var))
print("Available variables are: ", self.variables)
return None
if progress:
sys.stdout.write("Snapshot's path:\n '{}'\n".format(self.path))
m = 0
nd = self.attributes['ndims']
nn = self.attributes['bcells']
nc = self.attributes['ncells']
ng = self.attributes['nghosts']
ml = self.attributes['maxlev']
nx = self.attributes['xblocks']
ny = self.attributes['yblocks']
nm = self.attributes['nleafs']
if isinstance(maxlev,int):
if 1 <= maxlev <= ml:
shrink = 2**(ml-maxlev)
elif maxlev > ml:
sys.stdout.write("Parameter 'maxlev' should be between 1 and {}.\n".format(ml))
return None
else:
sys.stdout.write("Parameter 'maxlev' must be an integer between 1 and {}.\n".format(ml))
return None
dlo = np.array([self.attributes['xmin'], self.attributes['ymin']])
dup = np.array([self.attributes['xmax'], self.attributes['ymax']])
dln = np.array([self.attributes['xlen'], self.attributes['ylen']])
if nd == 3:
dlo = np.append(dlo, self.attributes['zmin'])
dup = np.append(dup, self.attributes['zmax'])
dln = np.append(dln, self.attributes['zlen'])
slo = np.array(dlo)
sup = np.array(dup)
if extent != None:
if len(extent) != 2 * nd:
print("Wrong dimension of the parameter 'extent' of the function dataset()!")
sys.exit(1)
slo = np.array(extent)[0::2]
sup = np.array(extent)[1::2]
if any(slo > dup) or any(sup < dlo) or any (slo >= sup):
print("Wrong values in the parameter 'extent' of the function dataset()!")
sys.exit(1)
if nd == 3:
nz = self.attributes['zblocks']
rm = np.array([nz, ny, nx])
bm = np.array([nc, nc, nc])
else:
rm = np.array([ny, nx])
bm = np.array([nc, nc])
mset = self.read_binary_meta('fields')
nf = int(mset.size / nm)
mset = np.reshape(mset, [nf,nm])
level = dict()
coord = dict()
for n in range(nm):
level[mset[0,n]] = mset[ 1,n]
coord[mset[0,n]] = mset[2:5,n]
ml = mset[1,:].max()
dm = rm * nc * 2**(ml - 1) // shrink
ll = np.array(np.floor(dm[::-1] * (slo - dlo) / dln), dtype='int')
uu = np.array( np.ceil(dm[::-1] * (sup - dlo) / dln), dtype='int')
dm = (uu - ll)[::-1]
arr = np.zeros(dm[:])
for n in range(self.attributes['nprocs']):
nb = self.chunks[n]['dblocks']
if nb > 0:
cm = [ nn ]*nd
cm.append( nb )
ids = self.read_binary_data(n, 'ids', dtype='int32')
if var == 'level':
dset = np.zeros(cm)
for p in range(nb):
dset[...,p] = level[ids[p]]
elif var == 'logd':
dset = np.log10(self.read_binary_data(n, 'dens'))
dset = np.reshape(dset, cm)
elif var == 'logp':
dset = np.log10(self.read_binary_data(n, 'pres'))
dset = np.reshape(dset, cm)
elif var == 'velo':
tmp = self.read_binary_data(n, 'velx')
dset = tmp**2
tmp = self.read_binary_data(n, 'vely')
dset += tmp**2
tmp = self.read_binary_data(n, 'velz')
dset += tmp**2
dset = np.reshape(np.sqrt(dset), cm)
elif var == 'magn':
tmp = self.read_binary_data(n, 'magx')
dset = tmp**2
tmp = self.read_binary_data(n, 'magy')
dset += tmp**2
tmp = self.read_binary_data(n, 'magz')
dset += tmp**2
dset = np.reshape(np.sqrt(dset), cm)
elif var == 'ekin':
tmp = self.read_binary_data(n, 'velx')
dset = tmp**2
tmp = self.read_binary_data(n, 'vely')
dset += tmp**2
tmp = self.read_binary_data(n, 'velz')
dset += tmp**2
tmp = self.read_binary_data(n, 'dens')
dset *= tmp
dset *= 0.5
dset = np.reshape(dset, cm)
elif var == 'emag':
tmp = self.read_binary_data(n, 'magx')
dset = tmp**2
tmp = self.read_binary_data(n, 'magy')
dset += tmp**2
tmp = self.read_binary_data(n, 'magz')
dset += tmp**2
dset *= 0.5
dset = np.reshape(dset, cm)
elif var == 'elex':
by = np.reshape(self.read_binary_data(n, 'magy'), cm)
bz = np.reshape(self.read_binary_data(n, 'magz'), cm)
vy = np.reshape(self.read_binary_data(n, 'vely'), cm)
vz = np.reshape(self.read_binary_data(n, 'velz'), cm)
dset = vy * bz - vz * by
if self.attributes['resistivity'] > 0:
if nd == 3:
tmp = (np.roll(by, 1, axis=0) - np.roll(by, -1, axis=0))
tmp += (np.roll(bz, -1, axis=1) - np.roll(bz, 1, axis=1))
else:
tmp = (np.roll(bz, -1, axis=0) - np.roll(bz, 1, axis=0))
for p in range(nb):
tmp[...,p] /= self.cell_size[level[ids[p]]]
dset -= 0.5 * self.attributes['resistivity'] * tmp
elif var == 'eley':
bx = np.reshape(self.read_binary_data(n, 'magx'), cm)
bz = np.reshape(self.read_binary_data(n, 'magz'), cm)
vx = np.reshape(self.read_binary_data(n, 'velx'), cm)
vz = np.reshape(self.read_binary_data(n, 'velz'), cm)
dset = vz * bx - vx * bz
if self.attributes['resistivity'] > 0:
if nd == 3:
tmp = (np.roll(bx, -1, axis=0) - np.roll(bx, 1, axis=0))
tmp += (np.roll(bz, 1, axis=2) - np.roll(bz, -1, axis=2))
else:
tmp = (np.roll(bz, 1, axis=1) - np.roll(bz, -1, axis=1))
for p in range(nb):
tmp[...,p] /= self.cell_size[level[ids[p]]]
dset -= 0.5 * self.attributes['resistivity'] * tmp
elif var == 'elez':
bx = np.reshape(self.read_binary_data(n, 'magx'), cm)
by = np.reshape(self.read_binary_data(n, 'magy'), cm)
vx = np.reshape(self.read_binary_data(n, 'velx'), cm)
vy = np.reshape(self.read_binary_data(n, 'vely'), cm)
dset = vx * by - vy * bx
if self.attributes['resistivity'] > 0:
if nd == 3:
tmp = (np.roll(bx, 1, axis=1) - np.roll(bx, -1, axis=1))
tmp += (np.roll(by, -1, axis=2) - np.roll(by, 1, axis=2))
else:
tmp = (np.roll(bx, 1, axis=0) - np.roll(bx, -1, axis=0))
tmp += (np.roll(by, -1, axis=1) - np.roll(by, 1, axis=1))
for p in range(nb):
tmp[...,p] /= self.cell_size[level[ids[p]]]
dset -= 0.5 * self.attributes['resistivity'] * tmp
elif var == 'eele':
b1 = np.reshape(self.read_binary_data(n, 'magy'), cm)
b2 = np.reshape(self.read_binary_data(n, 'magz'), cm)
v1 = np.reshape(self.read_binary_data(n, 'vely'), cm)
v2 = np.reshape(self.read_binary_data(n, 'velz'), cm)
dtmp = v1 * b2 - v2 * b1
if self.attributes['resistivity'] > 0:
if nd == 3:
tmp = (np.roll(b1, 1, axis=0) - np.roll(b1, -1, axis=0))
tmp += (np.roll(b2, -1, axis=1) - np.roll(b2, 1, axis=1))
else:
tmp = (np.roll(b2, -1, axis=0) - np.roll(b2, 1, axis=0))
for p in range(nb):
tmp[...,p] /= self.cell_size[level[ids[p]]]
dtmp -= 0.5 * self.attributes['resistivity'] * tmp
dset = dtmp**2
b1 = np.reshape(self.read_binary_data(n, 'magx'), cm)
v1 = np.reshape(self.read_binary_data(n, 'velx'), cm)
dtmp = v2 * b1 - v1 * b2
if self.attributes['resistivity'] > 0:
if nd == 3:
tmp = (np.roll(b1, -1, axis=0) - np.roll(b1, 1, axis=0))
tmp += (np.roll(b2, 1, axis=2) - np.roll(b2, -1, axis=2))
else:
tmp = (np.roll(b2, 1, axis=1) - np.roll(b2, -1, axis=1))
for p in range(nb):
tmp[...,p] /= self.cell_size[level[ids[p]]]
dtmp -= 0.5 * self.attributes['resistivity'] * tmp
dset += dtmp**2
b2 = np.reshape(self.read_binary_data(n, 'magy'), cm)
v2 = np.reshape(self.read_binary_data(n, 'vely'), cm)
dtmp = v1 * b2 - v2 * b1
if self.attributes['resistivity'] > 0:
if nd == 3:
tmp = (np.roll(b1, 1, axis=1) - np.roll(b1, -1, axis=1))
tmp += (np.roll(b2, -1, axis=2) - np.roll(b2, 1, axis=2))
else:
tmp = (np.roll(b1, 1, axis=0) - np.roll(b1, -1, axis=0))
tmp += (np.roll(b2, -1, axis=1) - np.roll(b2, 1, axis=1))
for p in range(nb):
tmp[...,p] /= self.cell_size[level[ids[p]]]
dtmp -= 0.5 * self.attributes['resistivity'] * tmp
dset += dtmp**2
dset = np.sqrt(dset)
elif var == 'eint':
dset = self.read_binary_data(n, 'pres')
dset *= 1.0 / (self.attributes('adiabatic_index') - 1)
dset = np.reshape(dset, cm)
elif var == 'temp':
dset = self.read_binary_data(n, 'pres') / self.read_binary_data(n, 'dens')
dset = np.reshape(dset, cm)
elif var == 'etot':
tmp = self.read_binary_data(n, 'velx')
dset = tmp**2
tmp = self.read_binary_data(n, 'vely')
dset += tmp**2
tmp = self.read_binary_data(n, 'velz')
dset += tmp**2
tmp = self.read_binary_data(n, 'dens')
dset *= tmp
tmp = self.read_binary_data(n, 'pres')
dset += 2.0 / (self.attributes('adiabatic_index') - 1) * tmp
if 'magn' in self.variables:
tmp = self.read_binary_data(n, 'magx')
dset = tmp**2
tmp = self.read_binary_data(n, 'magy')
dset += tmp**2
tmp = self.read_binary_data(n, 'magz')
dset += tmp**2
dset *= 0.5
dset = np.reshape(dset, cm)
elif var == 'lorentz':
tmp = self.read_binary_data(n, 'velx')
dset = tmp**2
tmp = self.read_binary_data(n, 'vely')
dset += tmp**2
tmp = self.read_binary_data(n, 'velz')
dset += tmp**2
dset = 1.0 / np.sqrt(1.0 - dset)
dset = np.reshape(dset, cm)
elif var == 'divv':
p = nd - 1
tmp = np.reshape(self.read_binary_data(n, 'velx'), cm)
dset = (np.roll(tmp, -1, axis=p) \
- np.roll(tmp, 1, axis=p))
p -= 1
tmp = np.reshape(self.read_binary_data(n, 'vely'), cm)
dset += (np.roll(tmp, -1, axis=p) \
- np.roll(tmp, 1, axis=p))
p -= 1
if p >= 0:
tmp = np.reshape(self.read_binary_data(n, 'velz'), cm)
dset += (np.roll(tmp, -1, axis=p) \
- np.roll(tmp, 0, axis=p))
dset *= 0.5
for p in range(nb):
dset[...,p] /= self.cell_size[level[ids[p]]]
elif var == 'vorx':
if nd == 3:
tmp = np.reshape(self.read_binary_data(n, 'vely'), cm)
dset = (np.roll(tmp, 1, axis=0) \
- np.roll(tmp, -1, axis=0))
tmp = np.reshape(self.read_binary_data(n, 'velz'), cm)
dset += (np.roll(tmp, -1, axis=1) \
- np.roll(tmp, 1, axis=1))
else:
tmp = np.reshape(self.read_binary_data(n, 'velz'), cm)
dset = (np.roll(tmp, -1, axis=0) \
- np.roll(tmp, 1, axis=0))
dset *= 0.5
for p in range(nb):
dset[...,p] /= self.cell_size[level[ids[p]]]
elif var == 'vory':
if nd == 3:
tmp = np.reshape(self.read_binary_data(n, 'velx'), cm)
dset = (np.roll(tmp, -1, axis=0) \
- np.roll(tmp, 1, axis=0))
tmp = np.reshape(self.read_binary_data(n, 'velz'), cm)
dset += (np.roll(tmp, 1, axis=2) \
- np.roll(tmp, -1, axis=2))
else:
tmp = np.reshape(self.read_binary_data(n, 'velz'), cm)
dset = (np.roll(tmp, 1, axis=1) \
- np.roll(tmp, -1, axis=1))
dset *= 0.5
for p in range(nb):
dset[...,p] /= self.cell_size[level[ids[p]]]
elif var == 'vorz':
if nd == 3:
tmp = np.reshape(self.read_binary_data(n, 'velx'), cm)
dset = (np.roll(tmp, 1, axis=1) \
- np.roll(tmp, -1, axis=1))
tmp = np.reshape(self.read_binary_data(n, 'vely'), cm)
dset += (np.roll(tmp, -1, axis=2) \
- np.roll(tmp, 1, axis=2))
else:
tmp = np.reshape(self.read_binary_data(n, 'velx'), cm)
dset = (np.roll(tmp, 1, axis=0) \
- np.roll(tmp, -1, axis=0))
tmp = np.reshape(self.read_binary_data(n, 'vely'), cm)
dset += (np.roll(tmp, -1, axis=1) \
- np.roll(tmp, 1, axis=1))
dset *= 0.5
for p in range(nb):
dset[...,p] /= self.cell_size[level[ids[p]]]
elif var == 'vort':
if nd == 3:
tmp = np.reshape(self.read_binary_data(n, 'velx'), cm)
wy = (np.roll(tmp, -1, axis=0) \
- np.roll(tmp, 1, axis=0))
wz = (np.roll(tmp, 1, axis=1) \
- np.roll(tmp, -1, axis=1))
tmp = np.reshape(self.read_binary_data(n, 'vely'), cm)
wz += (np.roll(tmp, -1, axis=2) \
- np.roll(tmp, 1, axis=2))
wx = (np.roll(tmp, 1, axis=0) \
- np.roll(tmp, -1, axis=0))
tmp = np.reshape(self.read_binary_data(n, 'velz'), cm)
wx += (np.roll(tmp, -1, axis=1) \
- np.roll(tmp, 1, axis=1))
wy += (np.roll(tmp, 1, axis=2) \
- np.roll(tmp, -1, axis=2))
else:
tmp = np.reshape(self.read_binary_data(n, 'velx'), cm)
wz = (np.roll(tmp, 1, axis=0) \
- np.roll(tmp, -1, axis=0))
tmp = np.reshape(self.read_binary_data(n, 'vely'), cm)
wz += (np.roll(tmp, -1, axis=1) \
- np.roll(tmp, 1, axis=1))
tmp = np.reshape(self.read_binary_data(n, 'velz'), cm)
wx = (np.roll(tmp, -1, axis=0) \
- np.roll(tmp, 1, axis=0))
wy = (np.roll(tmp, 1, axis=1) \
- np.roll(tmp, -1, axis=1))
dset = 0.5 * np.sqrt(wx**2 + wy**2 + wz**2)
for p in range(nb):
dset[...,p] /= self.cell_size[level[ids[p]]]
elif var == 'divb':
p = nd - 1
tmp = np.reshape(self.read_binary_data(n, 'magx'), cm)
dset = (np.roll(tmp, -1, axis=p) \
- np.roll(tmp, 1, axis=p))
p -= 1
tmp = np.reshape(self.read_binary_data(n, 'magy'), cm)
dset += (np.roll(tmp, -1, axis=p) \
- np.roll(tmp, 1, axis=p))
p -= 1
if p >= 0:
tmp = np.reshape(self.read_binary_data(n, 'magz'), cm)
dset += (np.roll(tmp, -1, axis=p) \
- np.roll(tmp, 0, axis=p))
dset *= 0.5
for p in range(nb):
dset[...,p] /= self.cell_size[level[ids[p]]]
elif var == 'curx':
if nd == 3:
tmp = np.reshape(self.read_binary_data(n, 'magy'), cm)
dset = (np.roll(tmp, 1, axis=0) \
- np.roll(tmp, -1, axis=0))
tmp = np.reshape(self.read_binary_data(n, 'magz'), cm)
dset += (np.roll(tmp, -1, axis=1) \
- np.roll(tmp, 1, axis=1))
else:
tmp = np.reshape(self.read_binary_data(n, 'magz'), cm)
dset = (np.roll(tmp, -1, axis=0) \
- np.roll(tmp, 1, axis=0))
dset *= 0.5
for p in range(nb):
dset[...,p] /= self.cell_size[level[ids[p]]]
elif var == 'cury':
if nd == 3:
tmp = np.reshape(self.read_binary_data(n, 'magx'), cm)
dset = (np.roll(tmp, -1, axis=0) \
- np.roll(tmp, 1, axis=0))
tmp = np.reshape(self.read_binary_data(n, 'magz'), cm)
dset += (np.roll(tmp, 1, axis=2) \
- np.roll(tmp, -1, axis=2))
else:
tmp = np.reshape(self.read_binary_data(n, 'magz'), cm)
dset = (np.roll(tmp, 1, axis=1) \
- np.roll(tmp, -1, axis=1))
dset *= 0.5
for p in range(nb):
dset[...,p] /= self.cell_size[level[ids[p]]]
elif var == 'curz':
if nd == 3:
tmp = np.reshape(self.read_binary_data(n, 'magx'), cm)
dset = (np.roll(tmp, 1, axis=1) \
- np.roll(tmp, -1, axis=1))
tmp = np.reshape(self.read_binary_data(n, 'magy'), cm)
dset += (np.roll(tmp, -1, axis=2) \
- np.roll(tmp, 1, axis=2))
else:
tmp = np.reshape(self.read_binary_data(n, 'magx'), cm)
dset = (np.roll(tmp, 1, axis=0) \
- np.roll(tmp, -1, axis=0))
tmp = np.reshape(self.read_binary_data(n, 'magy'), cm)
dset += (np.roll(tmp, -1, axis=1) \
- np.roll(tmp, 1, axis=1))
dset *= 0.5
for p in range(nb):
dset[...,p] /= self.cell_size[level[ids[p]]]
elif var == 'curr':
if nd == 3:
tmp = np.reshape(self.read_binary_data(n, 'magx'), cm)
wy = (np.roll(tmp, -1, axis=0) \
- np.roll(tmp, 1, axis=0))
wz = (np.roll(tmp, 1, axis=1) \
- np.roll(tmp, -1, axis=1))
tmp = np.reshape(self.read_binary_data(n, 'magy'), cm)
wz += (np.roll(tmp, -1, axis=2) \
- np.roll(tmp, 1, axis=2))
wx = (np.roll(tmp, 1, axis=0) \
- np.roll(tmp, -1, axis=0))
tmp = np.reshape(self.read_binary_data(n, 'magz'), cm)
wx += (np.roll(tmp, -1, axis=1) \
- np.roll(tmp, 1, axis=1))
wy += (np.roll(tmp, 1, axis=2) \
- np.roll(tmp, -1, axis=2))
else:
tmp = np.reshape(self.read_binary_data(n, 'magx'), cm)
wz = (np.roll(tmp, 1, axis=0) \
- np.roll(tmp, -1, axis=0))
tmp = np.reshape(self.read_binary_data(n, 'magy'), cm)
wz += (np.roll(tmp, -1, axis=1) \
- np.roll(tmp, 1, axis=1))
tmp = np.reshape(self.read_binary_data(n, 'magz'), cm)
wx = (np.roll(tmp, -1, axis=0) \
- np.roll(tmp, 1, axis=0))
wy = (np.roll(tmp, 1, axis=1) \
- np.roll(tmp, -1, axis=1))
dset = 0.5 * np.sqrt(wx**2 + wy**2 + wz**2)
for p in range(nb):
dset[...,p] /= self.cell_size[level[ids[p]]]
else:
dset = self.read_binary_data(n, var)
dset = np.reshape(dset, cm)
for p in range(nb):
ii = ids[p]
nl = 2**(ml - level[ii])
if nl <= shrink:
method = 'rebin'
else:
method = interpolation
cm = bm[0:nd] * nl // shrink
il = coord[ii][0:nd] * cm[0:nd]
iu = il + cm[0:nd]
if all(iu[:] > ll[:]) and all(il[:] < uu[:]):
nb = il[:] - ll[:]
ne = iu[:] - ll[:]
ib = np.maximum(nb[:], 0)
ie = np.minimum(ne[:], uu[:] - ll[:])
jb = ib[:] - nb[:]
je = ie[:] - ne[:] + cm[:]
if nd == 3:
arr[ib[2]:ie[2],ib[1]:ie[1],ib[0]:ie[0]] = interpolate(dset[:,:,:,p], cm, ng, method=method)[jb[2]:je[2],jb[1]:je[1],jb[0]:je[0]]
else:
arr[ib[1]:ie[1],ib[0]:ie[0]] = interpolate(dset[:,:,p], cm, ng, method=method)[jb[1]:je[1],jb[0]:je[0]]
if progress:
mfile = 'datablocks_{:06d}.xml'.format(n)
m += 1
sys.stdout.write('\r')
sys.stdout.write("Reading '{}' from '{}': block {} from {}".format(var, mfile, m, nm))
sys.stdout.flush()
if (progress):
sys.stdout.write('\n')
sys.stdout.flush()
return arr
def amun_compatible(fname):
'''
Subroutine checks if the HDF5 file is AMUN compatible.
Arguments:
fname - the HDF5 file name;
Return values:
True or False;
Examples:
comp = amun_compatible('p000010_00000.h5')
'''
with h5.File(fname, 'r') as f:
if 'codes' in f.attrs:
if f.attrs['code'].astype(str) == "AMUN":
return True
else:
print("'%s' contains attribute 'code'," % fname, \
" but it is not 'AMUN'!")
return False
elif 'attributes' in f and 'coordinates' in f and \
'variables' in f:
return True
else:
print("'%s' misses one of these groups:" % fname, \
"'attributes', 'coordinates' or 'variables'!")
return False
def amun_attribute(fname, aname):
'''
Subroutine to read global attributes from AMUN HDF5 snapshots.
Arguments:
fname - the HDF5 file name;
aname - the attribute name;
Return values:
ret - the value of the attribute or None;
Examples:
time = amun_attribute('p000010_00000.h5', 'time')
'''
if not amun_compatible(fname):
return None
with h5.File(fname, 'r') as f:
if aname in f['attributes'].attrs:
attr = f['attributes'].attrs[aname]
if attr.dtype.type is np.string_:
ret = np.squeeze(attr).astype(str)
else:
ret = np.squeeze(attr)
return ret
else:
print("Attribute '%s' cannot be found in '%s'!" % (aname, fname))
return None
def amun_coordinate(fname, iname):
'''
Subroutine to read coordinate items from AMUN HDF5 snapshots.
Arguments:
fname - the HDF5 file name;
iname - the item name;
Return values:
ret - the value of the item or None;
Examples:
bounds = amun_coordinate('p000010_00000.h5', 'bounds')
'''
if not amun_compatible(fname):
return None
with h5.File(fname, 'r') as f:
if iname in f['coordinates']:
return np.array(f['coordinates'][iname])
else:
print("Coordinate item '%s' not found in group 'coordinate' of '%s'!" % (iname, fname))
return None
def amun_dataset(fname, vname, shrink=1, interpolation='rebin', progress=False):
'''
Subroutine to read datasets from AMUN HDF5 snapshots.
Arguments:
fname - the HDF5 file name;
vname - the variable name;
shrink - the shrink factor (must be the power of 2 and not larger
than the block size);
progress - the progress bar switch;
Return values:
ret - the array of values for the variable;
Examples:
dn = amun_dataset('p000010_00000.h5', 'dens')
'''
if not amun_compatible(fname):
return None
dname = op.dirname(fname)
if progress:
sys.stdout.write("Data file path:\n '%s'\n" % (dname))
# get attributes necessary to reconstruct the domain
#
eqsys = amun_attribute(fname, 'eqsys')
eos = amun_attribute(fname, 'eos')
nr = amun_attribute(fname, 'isnap')
nc = amun_attribute(fname, 'nprocs')
nl = amun_attribute(fname, 'nleafs')
if eos == 'adi':
gm = amun_attribute(fname, 'adiabatic_index')
# get block dimensions and the maximum level
#
ndims = amun_attribute(fname, 'ndims')
nn = amun_attribute(fname, 'ncells')
bm = np.array([nn, nn, nn])
if ndims == 2:
bm[2] = 1
ng = amun_attribute(fname, 'nghosts')
ml = amun_attribute(fname, 'maxlev')
# get the base block dimensions
#
rm = amun_attribute(fname, 'bdims')
if rm is None:
rm = amun_attribute(fname, 'domain_base_dims')
if rm is None:
rm = amun_attribute(fname, 'rdims')
if rm is None:
return None
# build the list of supported variables
#
variables = []
with h5.File(fname, 'r') as f:
for var in f['variables'].keys():
variables.append(var)
# add derived variables if possible
#
variables.append('level')
if 'velx' in variables and 'vely' in variables and 'velz' in variables:
variables.append('velo')
variables.append('divv')
variables.append('vort')
if 'magx' in variables and 'magy' in variables and 'magz' in variables:
variables.append('magn')
variables.append('divb')
variables.append('curr')
if (eqsys == 'hd' or eqsys == 'mhd') and eos == 'adi' \
and 'pres' in variables:
variables.append('eint')
if 'dens' in variables and 'pres' in variables:
variables.append('temp')
if (eqsys == 'hd' or eqsys == 'mhd') \
and 'dens' in variables \
and 'velx' in variables \
and 'vely' in variables \
and 'velz' in variables:
variables.append('ekin')
if (eqsys == 'mhd' or eqsys == 'srmhd') \
and 'magx' in variables \
and 'magy' in variables \
and 'magz' in variables:
variables.append('emag')
if eqsys == 'hd' and 'ekin' in variables and 'eint' in variables:
variables.append('etot')
if eqsys == 'mhd' and 'eint' in variables \
and 'ekin' in variables \
and 'emag' in variables:
variables.append('etot')
if (eqsys == 'srhd' or eqsys == 'srmhd') and 'velo' in variables:
variables.append('lore')
# check if the requested variable is in the variable list
#
if not vname in variables:
print('The requested variable cannot be extracted from the file datasets!')
return None
# check if the shrink parameter is correct (block dimensions should be
# divisible by the shrink factor)
#
shrink = max(1, int(shrink))
if shrink > 1:
if (nn % shrink) != 0:
print('The block dimension should be divisible by the shrink factor!')
return None
sh = shrink
while(sh > 2 and sh % 2 == 0):
sh = int(sh / 2)
if (sh % 2) != 0:
print('The shrink factor should be a power of 2!')
return None
# determine the actual maximum level from the blocks
#
levs = []
for n in range(nc):
fname = 'p%06d_%05d.h5' % (nr, n)
lname = op.join(dname, fname)
dblocks = amun_attribute(lname, 'dblocks')
if dblocks > 0:
levs = np.append(levs, [amun_coordinate(lname, 'levels')])
ml = int(levs.max())
# prepare dimensions of the output array and allocate it
#
dm = np.array(rm[0:ndims] * bm[0:ndims] * 2**(ml - 1) / shrink, \
dtype=np.int32)
ret = np.zeros(dm[::-1])
# iterate over all subdomain files
#
nb = 0
for n in range(nc):
fname = 'p%06d_%05d.h5' % (nr, n)
lname = op.join(dname, fname)
dblocks = amun_attribute(lname, 'dblocks')
if dblocks > 0:
levels = amun_coordinate(lname, 'levels')
coords = amun_coordinate(lname, 'coords')
dx = amun_coordinate(lname, 'dx')
dy = amun_coordinate(lname, 'dy')
dz = amun_coordinate(lname, 'dz')
with h5.File(lname, 'r') as f:
g = f['variables']
if vname == 'level':
dataset = np.zeros(g[variables[0]].shape)
for l in range(dblocks):
dataset[:,:,:,l] = levels[l]
elif vname == 'velo':
dataset = np.sqrt(g['velx'][:,:,:,:]**2 \
+ g['vely'][:,:,:,:]**2 \
+ g['velz'][:,:,:,:]**2)
elif vname == 'magn':
dataset = np.sqrt(g['magx'][:,:,:,:]**2 \
+ g['magy'][:,:,:,:]**2 \
+ g['magz'][:,:,:,:]**2)
elif vname == 'eint':
dataset = 1.0 / (gm - 1.0) * g['pres'][:,:,:,:]
elif vname == 'ekin':
dataset = 0.5 * g['dens'][:,:,:,:] * (g['velx'][:,:,:,:]**2 \
+ g['vely'][:,:,:,:]**2 \
+ g['velz'][:,:,:,:]**2)
elif vname == 'emag':
dataset = 0.5 * (g['magx'][:,:,:,:]**2 \
+ g['magy'][:,:,:,:]**2 \
+ g['magz'][:,:,:,:]**2)
elif vname == 'etot':
dataset = 1.0 / (gm - 1.0) * g['pres'][:,:,:,:] \
+ 0.5 * g['dens'][:,:,:,:] * (g['velx'][:,:,:,:]**2 \
+ g['vely'][:,:,:,:]**2 \
+ g['velz'][:,:,:,:]**2)
if eqsys == 'mhd':
dataset += 0.5 * (g['magx'][:,:,:,:]**2 \
+ g['magy'][:,:,:,:]**2 \
+ g['magz'][:,:,:,:]**2)
elif vname == 'temp':
dataset = g['pres'][:,:,:,:] / g['dens'][:,:,:,:]
elif vname == 'lore':
dataset = 1.0 / np.sqrt(1.0 - (g['velx'][:,:,:,:]**2 \
+ g['vely'][:,:,:,:]**2 \
+ g['velz'][:,:,:,:]**2))
elif vname == 'divv':
dataset = np.zeros(g['velx'].shape)
fields = [ 'velx', 'vely', 'velz' ]
h = (dx, dy, dz)
for i in range(ndims):
v = fields[i]
dataset += 0.5 * (np.roll(g[v][:,:,:,:], -1, axis=2) \
- np.roll(g[v][:,:,:,:], 1, axis=2)) \
/ h[i][levels[:] - 1]
elif vname == 'divb':
dataset = np.zeros(g['magx'].shape)
fields = [ 'magx', 'magy', 'magz' ]
h = (dx, dy, dz)
for i in range(ndims):
v = fields[i]
dataset += 0.5 * (np.roll(g[v][:,:,:,:], -1, axis=2) \
- np.roll(g[v][:,:,:,:], 1, axis=2)) \
/ h[i][levels[:] - 1]
elif vname == 'vort':
if ndims == 3:
wx = 0.5 * (np.roll(g['velz'][:,:,:,:], -1, axis=1) \
- np.roll(g['velz'][:,:,:,:], 1, axis=1)) \
/ dy[levels[:]-1] \
- 0.5 * (np.roll(g['vely'][:,:,:,:], -1, axis=0) \
- np.roll(g['vely'][:,:,:,:], 1, axis=0)) \
/ dz[levels[:]-1]
wy = 0.5 * (np.roll(g['velx'][:,:,:,:], -1, axis=0) \
- np.roll(g['velx'][:,:,:,:], 1, axis=0)) \
/ dz[levels[:]-1] \
- 0.5 * (np.roll(g['velz'][:,:,:,:], -1, axis=2) \
- np.roll(g['velz'][:,:,:,:], 1, axis=2)) \
/ dx[levels[:]-1]
wz = 0.5 * (np.roll(g['vely'][:,:,:,:], -1, axis=2) \
- np.roll(g['vely'][:,:,:,:], 1, axis=2)) \
/ dx[levels[:]-1] \
- 0.5 * (np.roll(g['velx'][:,:,:,:], -1, axis=1) \
- np.roll(g['velx'][:,:,:,:], 1, axis=1)) \
/ dy[levels[:]-1]
else:
wx = 0.5 * (np.roll(g['velz'][:,:,:,:], -1, axis=1) \
- np.roll(g['velz'][:,:,:,:], 1, axis=1)) \
/ dy[levels[:]-1]
wy = - 0.5 * (np.roll(g['velz'][:,:,:,:], -1, axis=2) \
- np.roll(g['velz'][:,:,:,:], 1, axis=2)) \
/ dx[levels[:]-1]
wz = 0.5 * (np.roll(g['vely'][:,:,:,:], -1, axis=2) \
- np.roll(g['vely'][:,:,:,:], 1, axis=2)) \
/ dx[levels[:]-1] \
- 0.5 * (np.roll(g['velx'][:,:,:,:], -1, axis=1) \
- np.roll(g['velx'][:,:,:,:], 1, axis=1)) \
/ dy[levels[:]-1]
dataset = np.sqrt(wx * wx + wy * wy + wz * wz)
elif vname == 'curr':
if ndims == 3:
wx = 0.5 * (np.roll(g['magz'][:,:,:,:], -1, axis=1) \
- np.roll(g['magz'][:,:,:,:], 1, axis=1)) \
/ dy[levels[:]-1] \
- 0.5 * (np.roll(g['magy'][:,:,:,:], -1, axis=0) \
- np.roll(g['magy'][:,:,:,:], 1, axis=0)) \
/ dz[levels[:]-1]
wy = 0.5 * (np.roll(g['magx'][:,:,:,:], -1, axis=0) \
- np.roll(g['magx'][:,:,:,:], 1, axis=0)) \
/ dz[levels[:]-1] \
- 0.5 * (np.roll(g['magz'][:,:,:,:], -1, axis=2) \
- np.roll(g['magz'][:,:,:,:], 1, axis=2)) \
/ dx[levels[:]-1]
wz = 0.5 * (np.roll(g['magy'][:,:,:,:], -1, axis=2) \
- np.roll(g['magy'][:,:,:,:], 1, axis=2)) \
/ dx[levels[:]-1] \
- 0.5 * (np.roll(g['magx'][:,:,:,:], -1, axis=1) \
- np.roll(g['magx'][:,:,:,:], 1, axis=1)) \
/ dy[levels[:]-1]
else:
wx = 0.5 * (np.roll(g['magz'][:,:,:,:], -1, axis=1) \
- np.roll(g['magz'][:,:,:,:], 1, axis=1)) \
/ dy[levels[:]-1]
wy = - 0.5 * (np.roll(g['magz'][:,:,:,:], -1, axis=2) \
- np.roll(g['magz'][:,:,:,:], 1, axis=2)) \
/ dx[levels[:]-1]
wz = 0.5 * (np.roll(g['magy'][:,:,:,:], -1, axis=2) \
- np.roll(g['magy'][:,:,:,:], 1, axis=2)) \
/ dx[levels[:]-1] \
- 0.5 * (np.roll(g['magx'][:,:,:,:], -1, axis=1) \
- np.roll(g['magx'][:,:,:,:], 1, axis=1)) \
/ dy[levels[:]-1]
dataset = np.sqrt(wx * wx + wy * wy + wz * wz)
else:
dataset = g[vname][:,:,:,:]
# rescale all blocks to the effective resolution
#
for l in range(dblocks):
nn = 2**(ml - levels[l])
if nn <= shrink:
method = 'rebin'
else:
method = interpolation
cm = np.array(bm[0:ndims] * nn / shrink, dtype=np.int32)
ibeg = coords[0:ndims,l] * cm[0:ndims]
iend = ibeg + cm[0:ndims]
if ndims == 3:
ib, jb, kb = ibeg[0], ibeg[1], ibeg[2]
ie, je, ke = iend[0], iend[1], iend[2]
ret[kb:ke,jb:je,ib:ie] = interpolate(dataset[:,:,:,l], cm, ng, method=method)
else:
ib, jb = ibeg[0], ibeg[1]
ie, je = iend[0], iend[1]
ret[jb:je,ib:ie] = interpolate(dataset[0,:,:,l], cm, ng, method=method)
nb += 1
# print progress bar if desired
#
if progress:
sys.stdout.write('\r')
sys.stdout.write("Reading '%s' from '%s': block %d from %d" \
% (vname, fname, nb, nl))
sys.stdout.flush()
if (progress):
sys.stdout.write('\n')
sys.stdout.flush()
return ret
def amun_integrals(field, filename, pathlist):
'''
get_integral: iterate over pathlist and read and merge field values from filename files in the provided paths
'''
# Initiate the return values with empty array and file number.
#
vals = np.array([])
num = 1
# Iterate over all paths provided in the list 'pathlist'.
#
for path in pathlist:
# Iterate over all files in the current path.
#
while True:
# Generate file name.
#
dfile = path + '/' + filename + '_' + str(num).zfill(2) + '.dat'
# Check if the file exists.
#
if op.isfile(dfile):
# Read values from the current integrals file.
#
lvals = read_integrals(dfile, field)
# Append to the return array.
#
vals = np.append(vals, lvals)
# Increase the number file.
#
num = num + 1
else:
# File does not exists, so go to the next path.
#
break
# Return appended values.
#
return vals
def read_integrals(filename, column):
'''
read_integrals: reads a given column from an integral file.
'''
# Open the given file and check if it is text file.
#
f = open(filename, 'r')
# Read fist line and store it in h, since it will be used to obtain the
# column headers.
#
l = f.readline()
h = l
# Read first line which is not comment in order to determine the number of
# columns and store the number of columns in nc. Calculate the column width
# and store it in wc.
#
while l.startswith('#'):
l = f.readline()
nc = len(l.rsplit())
wc = int((len(l) - 9) / (nc - 1))
# Split header line into a list.
#
lh = [h[1:9].strip()]
for i in range(nc - 1):
ib = i * wc + 10
ie = ib + wc - 1
lh.append(h[ib:ie].strip())
ic = lh.index(column)
# Read given column.
#
if (ic > -1):
lc = [float(l.split()[ic])]
for l in f:
lc.append(float(l.split()[ic]))
# Close the file.
#
f.close()
# Return values.
#
return(np.array(lc))
def rebin(a, newshape):
'''
Subroutine changes the size of the input array to to new shape,
by copying cells or averaging them.
'''
assert len(a.shape) == len(newshape)
m = a.ndim - 1
if (a.shape[m] > newshape[m]):
if a.ndim == 3:
nn = [newshape[0], int(a.shape[0] / newshape[0]),
newshape[1], int(a.shape[1] / newshape[1]),
newshape[2], int(a.shape[2] / newshape[2])]
return a.reshape(nn).mean(5).mean(3).mean(1)
else:
nn = [newshape[0], int(a.shape[0] / newshape[0]),
newshape[1], int(a.shape[1] / newshape[1])]
return a.reshape(nn).mean(3).mean(1)
else:
for n in range(a.ndim):
a = np.repeat(a, int(newshape[n] / a.shape[n]), axis=n)
return(a)
def interpolate(a, newshape, nghosts, method = 'rebin'):
'''
Subroutine rescales the block by interpolating its values.
'''
if (method == 'rebin' or not scipy_available):
# calculate the indices in order to remove the ghost zones
#
if a.ndim == 3:
ib = nghosts
jb = nghosts
kb = nghosts
ie = a.shape[2] - nghosts
je = a.shape[1] - nghosts
ke = a.shape[0] - nghosts
if (a.shape[0] == 1):
kb = 0
ke = 1
return rebin(a[kb:ke,jb:je,ib:ie], newshape)
else:
ib = nghosts
jb = nghosts
ie = a.shape[1] - nghosts
je = a.shape[0] - nghosts
return rebin(a[jb:je,ib:ie], newshape)
elif (method == 'monotonic' or method == 'pchip'):
dims = np.arange(a.ndim)
q = a
for n in dims:
d2 = np.roll(q,-1, axis=0) + np.roll(q, 1, axis=0) - 2.0 * q
q = q - d2 / 24.0
d = np.array(q.shape)
xo = (np.arange(0.5, a.shape[n]) - nghosts) / (a.shape[n] - 2 * nghosts)
xn = np.arange(0.5, newshape[n]) / newshape[n]
u = q.reshape([d[0], int(q.size / d[0])])
f = np.zeros([newshape[n], int(q.size / d[0])])
for i in range(int(q.size / d[0])):
f[:,i] = pchip_interpolate(xo, u[:,i], xn)
d[0] = newshape[n]
f = f.reshape(d)
q = f.transpose(np.roll(dims, -1))
return q
elif (method == 'spline'):
dims = np.arange(a.ndim)
q = a
for n in dims:
d2 = np.roll(q,-1, axis=0) + np.roll(q, 1, axis=0) - 2.0 * q
q = q - d2 / 24.0
d = np.array(q.shape)
xo = (np.arange(0.5, a.shape[n]) - nghosts) / (a.shape[n] - 2 * nghosts)
xn = np.arange(0.5, newshape[n]) / newshape[n]
u = q.reshape([d[0], int(q.size / d[0])])
f = np.zeros([newshape[n], int(q.size / d[0])])
for i in range(int(q.size / d[0])):
t = splrep(xo, u[:,i], k=5, s=0.0)
f[:,i] = splev(xn, t)
d[0] = newshape[n]
f = f.reshape(d)
q = f.transpose(np.roll(dims, -1))
return q
else:
dims = np.arange(a.ndim)
q = a
for n in dims:
d2 = np.roll(q,-1, axis=0) + np.roll(q, 1, axis=0) - 2.0 * q
q = q - d2 / 24.0
d = np.array(q.shape)
xo = (np.arange(0.5, a.shape[n]) - nghosts) / (a.shape[n] - 2 * nghosts)
xn = np.arange(0.5, newshape[n]) / newshape[n]
u = q.reshape([d[0], int(q.size / d[0])])
f = np.zeros([newshape[n], int(q.size / d[0])])
for i in range(int(q.size / d[0])):
t = interp1d(xo, u[:,i], kind=method)
f[:,i] = t(xn)
d[0] = newshape[n]
f = f.reshape(d)
q = f.transpose(np.roll(dims, -1))
return q
if __name__ == "__main__":
fname = './p000030_00000.h5'
ret = amun_attribute(fname, 'time')
print(ret)
ret = amun_attribute(fname, 'dims')
print(ret)
ret = amun_dataset(fname, 'dens')
print(ret.shape, ret.min(), ret.max())