"Fossies" - the Fresh Open Source Software Archive

Member "veusz-3.1/veusz/dataimport/fits_hdf5_helpers.py" (11 Sep 2017, 8663 Bytes) of package /linux/privat/veusz-3.1.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Python source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. For more information about "fits_hdf5_helpers.py" see the Fossies "Dox" file reference documentation.

    1 #    Copyright (C) 2017 Jeremy S. Sanders
    2 #    Email: Jeremy Sanders <jeremy@jeremysanders.net>
    3 #
    4 #    This program is free software; you can redistribute it and/or modify
    5 #    it under the terms of the GNU General Public License as published by
    6 #    the Free Software Foundation; either version 2 of the License, or
    7 #    (at your option) any later version.
    8 #
    9 #    This program is distributed in the hope that it will be useful,
   10 #    but WITHOUT ANY WARRANTY; without even the implied warranty of
   11 #    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   12 #    GNU General Public License for more details.
   13 #
   14 #    You should have received a copy of the GNU General Public License along
   15 #    with this program; if not, write to the Free Software Foundation, Inc.,
   16 #    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
   17 ##############################################################################
   18 
   19 from __future__ import division, print_function, absolute_import
   20 
   21 import sys
   22 import ast
   23 import re
   24 import numpy as N
   25 
   26 from .. import qtall as qt
   27 
   28 def _(text, disambiguation=None, context="Import_FITS_HDF5"):
   29     return qt.QCoreApplication.translate(context, text, disambiguation)
   30 
   31 def filterAttrsByName(attrs, name):
   32     """For compound datasets, attributes can be given on a per-column basis.
   33     This filters the attributes by the column name."""
   34 
   35     name = name.strip()
   36     attrsout = {}
   37     for a in attrs:
   38         # attributes with _dsname suffixes are copied
   39         if a[:4] == "vsz_" and a[-len(name)-1:] == "_"+name:
   40             attrsout[a[:-len(name)-1]] = attrs[a]
   41     return attrsout
   42 
   43 def convertTextToSlice(slicetxt, numdims):
   44     """Convert a value like 0:1:3,:,::-1 to a tuple slice
   45     ((0,1,3), (None, None, None), (None, None, -1))
   46     or reduce dimensions such as :,3 -> ((None,None,None),3)
   47 
   48     Also checks number of dimensions (including reduced) is numdims.
   49 
   50     Return -1 on error
   51     """
   52 
   53     if slicetxt.strip() == '':
   54         return None
   55 
   56     slicearray = slicetxt.split(',')
   57     if len(slicearray) != numdims:
   58         # slice needs same dimensions as data
   59         return -1
   60 
   61     allsliceout = []
   62     for sliceap_idx, sliceap in enumerate(slicearray):
   63         sliceparts = sliceap.strip().split(':')
   64 
   65         if len(sliceparts) == 1:
   66             # reduce dimensions with single index
   67             try:
   68                 allsliceout.append(int(sliceparts[0]))
   69             except ValueError:
   70                 # invalid index
   71                 return -1
   72         elif len(sliceparts) not in (2, 3):
   73             return -1
   74         else:
   75             sliceout = []
   76             for p in sliceparts:
   77                 p = p.strip()
   78                 if not p:
   79                     sliceout.append(None)
   80                 else:
   81                     try:
   82                         sliceout.append(int(p))
   83                     except ValueError:
   84                         return -1
   85             if len(sliceout) == 2:
   86                 sliceout.append(None)
   87             allsliceout.append(tuple(sliceout))
   88 
   89     allempty = True
   90     for s in allsliceout:
   91         if s != (None, None, None):
   92             allempty = False
   93     if allempty:
   94         return None
   95 
   96     return tuple(allsliceout)
   97 
   98 def convertSliceToText(slice):
   99     """Convert tuple slice into text."""
  100     if slice is None:
  101         return ''
  102     out = []
  103     for spart in slice:
  104         if isinstance(spart, int):
  105             # single index
  106             out.append(str(spart))
  107             continue
  108 
  109         sparttxt = []
  110         for p in spart:
  111             if p is not None:
  112                 sparttxt.append(str(p))
  113             else:
  114                 sparttxt.append('')
  115         if sparttxt[-1] == '':
  116             del sparttxt[-1]
  117         out.append(':'.join(sparttxt))
  118     return ', '.join(out)
  119 
  120 def applySlices(data, slices):
  121     """Given hdf/numpy dataset, apply slicing tuple to it and return data."""
  122     slist = []
  123     for s in slices:
  124         if isinstance(s, int):
  125             slist.append(s)
  126         else:
  127             slist.append(slice(*s))
  128             if s[2] is not None and s[2] < 0:
  129                 # negative slicing doesn't work in h5py, so we
  130                 # make a copy
  131                 data = N.array(data)
  132     try:
  133         data = data[tuple(slist)]
  134     except (ValueError, IndexError):
  135         data = N.array([], dtype=N.float64)
  136     return data
  137 
  138 class ConvertError(RuntimeError):
  139     pass
  140 
  141 def convertFromBytes(s):
  142     """h5py often returns bytes instead of unicode.
  143     This decodes if in bytes
  144     """
  145     if sys.version_info[0] == 3:
  146         if isinstance(s, bytes):
  147             return s.decode('utf-8')
  148     return s
  149 
  150 def convertDatasetToObject(data, slices):
  151     """Convert numpy/hdf dataset to suitable data for veusz.
  152     Raise ConvertError if cannot."""
  153 
  154     # lazily-loaded h5py
  155     try:
  156         from h5py import check_dtype
  157     except ImportError:
  158         # fallback if no h5py, e.g. only installed fits
  159         def check_dtype(vlen=None):
  160             return False
  161 
  162     if slices:
  163         data = applySlices(data, slices)
  164 
  165     try:
  166         kind = data.dtype.kind
  167     except TypeError:
  168         raise ConvertError(_("Could not get data type of dataset"))
  169 
  170     if kind in ('b', 'i', 'u', 'f'):
  171         data = N.array(data, dtype=N.float64)
  172         if data.ndim == 0:
  173             raise ConvertError(_("Dataset has no dimensions"))
  174         return data
  175 
  176     elif kind in ('S', 'a', 'U') or (
  177         kind == 'O' and check_dtype(vlen=data.dtype) is str):
  178         if hasattr(data, 'ndim') and data.ndim != 1:
  179             raise ConvertError(_("Text datasets must have 1 dimension"))
  180 
  181         strcnv = list(data)
  182         return strcnv
  183 
  184     raise ConvertError(_("Dataset has an invalid type"))
  185 
  186 def getFITSHduNames(fitsfile):
  187     """Return list of names to give HDUs given a FITS file."""
  188 
  189     nameset = set()
  190     names = []
  191 
  192     for i, hdu in enumerate(fitsfile):
  193         name = hdu.name
  194 
  195         if not name:
  196             name = 'hdu%i' % i
  197             # just in case people start naming HDUs hduX...
  198             while name in nameset:
  199                 name += '~'
  200         else:
  201             name = name.lower()
  202 
  203             # EXTVER distinguishes identical names
  204             if 'EXTVER' in hdu.header:
  205                 name = '%s%i' % (name, hdu.header['EXTVER'])
  206 
  207             # prevent duplicates
  208             if name in nameset:
  209                 i = 2
  210                 while name+str(i) in nameset:
  211                     i += 1
  212                 name += str(i)
  213 
  214         nameset.add(name)
  215         names.append(name)
  216 
  217     return names
  218 
  219 def convertFITSDataFormat(fmt):
  220     """Convert FITS TFORM codes into:
  221 
  222     (code, nlen)
  223 
  224     code is 'invalid', 'numeric' or 'text'
  225     nlen is number of entries for column
  226     """
  227 
  228     # match the fits format text code [r]F[w[.d]]
  229     m = re.match(r'^([0-9]*)([A-Za-z])([0-9]*)(\.[0-9]+)?$', fmt)
  230     if not m:
  231         return (None, 'invalid', ())
  232     grps = m.groups()
  233 
  234     # length of array
  235     try:
  236         nlen = int(grps[0])
  237     except ValueError:
  238         nlen = 1
  239     fcode = grps[1].upper()
  240     width = grps[2]
  241 
  242     if fcode == 'A' and not width:
  243         # note: we can't handle 2d text arrays, so we handle as invalid
  244         code = 'text'
  245         # even though strings are N characters, they are handled as singles
  246         nlen = 1
  247 
  248     elif fcode in 'LXBIJKED':
  249         code = 'numeric'
  250 
  251     else:
  252         code = 'invalid'
  253 
  254     return code, nlen
  255 
  256 def hduVeuszAttrs(hdu):
  257     """Get veusz-specific attributes from a HDU header.
  258 
  259     These use the VEUSZ keyword and have the format of
  260     KEY=VALUE
  261      or
  262     COLUMN: KEY=VALUE
  263 
  264     Returns (attrs, colattrs)
  265 
  266     Where attrs is HDU-specific and colattrs is column specific (dict
  267     of dicts)
  268 
  269     """
  270 
  271     attrs = {}
  272     colattrs = {}
  273 
  274     for k, v in hdu.header.items():
  275         if k.lower() == 'veusz':
  276 
  277             # match syntax [OPTIONAL COLUMN:] KEY=VALUE
  278             match = re.match(
  279                 r'^(?:([a-zA-Z0-9_]+)[ ]*:)?[ ]*([a-zA-Z0-9_]+)[ ]*=[ ]*(.*)$',
  280                 v)
  281             if not match:
  282                 continue
  283 
  284             col, key, tval = match.groups()
  285             key = key.lower()
  286             tval = tval.strip()
  287 
  288             # convert to python type if possible
  289             try:
  290                 val = ast.literal_eval(tval)
  291             except Exception:
  292                 val = tval
  293 
  294             if col:
  295                 col = col.lower()
  296 
  297                 # column-specific key value
  298                 if col not in colattrs:
  299                     colattrs[col] = {key: val}
  300                 else:
  301                     colattrs[col][key] = val
  302             else:
  303                 # hdu-specific key value
  304                 attrs[key] = val
  305 
  306     return attrs, colattrs