aboutsummaryrefslogtreecommitdiffstats
path: root/astro
diff options
context:
space:
mode:
Diffstat (limited to 'astro')
-rwxr-xr-xastro/add_xflt.py159
-rwxr-xr-xastro/chandra/blanksky_add_time.py91
-rwxr-xr-xastro/fits/merge_fits.py234
-rwxr-xr-xastro/fitting/fit_betasbp_cut.py458
-rwxr-xr-xastro/lc_clean.py151
-rwxr-xr-xastro/marx/marx_pntsrc.py121
-rwxr-xr-xastro/marx/randpoints.py327
-rwxr-xr-xastro/query_ned.py137
-rwxr-xr-xastro/query_simbad.py139
9 files changed, 1817 insertions, 0 deletions
diff --git a/astro/add_xflt.py b/astro/add_xflt.py
new file mode 100755
index 0000000..8a718e6
--- /dev/null
+++ b/astro/add_xflt.py
@@ -0,0 +1,159 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Aaron LI
+# 2015/06/16
+
+"""
+Add XFLT#### keywords to the spectrum header according to the provided
+region, in order to employ the "PROJCT" model in XSPEC.
+"""
+
+__version__ = "0.1.0"
+__date__ = "2015-11-14"
+
+import sys
+import argparse
+import subprocess
+import re
+import os
+from collections import OrderedDict
+
+
+def parse_region(regstr):
+ """
+ Parse the given region string into one of the following 4 cases:
+ 1. annulus
+ 2. pie (cxc)
+ 3. pie + annulus (ftools/xmm)
+ 4. other
+
+ For the first 3 cases, the return is a dictionary like:
+ { 'shape': 'pie', 'xc': 55, 'yc': 89,
+ 'radius_in': 50, 'radius_out': 100,
+ 'angle_begin': 30, 'angle_end': 120 }
+ Otherwise, return None.
+ """
+ re_annulus = re.compile(r'^.*(?P<shape>annulus)\(\s*(?P<xc>[\d.-]+)\s*,\s*(?P<yc>[\d.-]+)\s*,\s*(?P<radius_in>[\d.-]+)\s*,\s*(?P<radius_out>[\d.-]+)\s*\).*$', re.I)
+ re_pie_cxc = re.compile(r'^.*(?P<shape>pie)\(\s*(?P<xc>[\d.-]+)\s*,\s*(?P<yc>[\d.-]+)\s*,\s*(?P<radius_in>[\d.-]+)\s*,\s*(?P<radius_out>[\d.-]+)\s*,\s*(?P<angle_begin>[\d.-]+)\s*,\s*(?P<angle_end>[\d.-]+)\s*\).*$', re.I)
+ re_pie_ft = re.compile(r'^.*(?P<shape>pie)\(\s*(?P<xc>[\d.-]+)\s*,\s*(?P<yc>[\d.-]+)\s*,\s*(?P<angle_begin>[\d.-]+)\s*,\s*(?P<angle_end>[\d.-]+)\s*\).*$', re.I)
+ m_annulus = re_annulus.match(regstr)
+ m_pie_cxc = re_pie_cxc.match(regstr)
+ m_pie_ft = re_pie_ft.match(regstr)
+ if m_pie_cxc is not None:
+ # case 2: pie (cxc)
+ region = OrderedDict([
+ ('shape', m_pie_cxc.group('shape').lower()),
+ ('xc', float(m_pie_cxc.group('xc'))),
+ ('yc', float(m_pie_cxc.group('yc'))),
+ ('radius_in', float(m_pie_cxc.group('radius_in'))),
+ ('radius_out', float(m_pie_cxc.group('radius_out'))),
+ ('angle_begin', float(m_pie_cxc.group('angle_begin'))),
+ ('angle_end', float(m_pie_cxc.group('angle_end')))
+ ])
+ elif m_pie_ft is not None:
+ # pie (ftools/xmm)
+ if m_annulus is not None:
+ # case 3: pie + annulus (ftools/xmm)
+ region = OrderedDict([
+ ('shape', m_pie_ft.group('shape').lower()),
+ ('xc', float(m_pie_ft.group('xc'))),
+ ('yc', float(m_pie_ft.group('yc'))),
+ ('radius_in', float(m_annulus.group('radius_in'))),
+ ('radius_out', float(m_annulus.group('radius_out'))),
+ ('angle_begin', float(m_pie_ft.group('angle_begin'))),
+ ('angle_end', float(m_pie_ft.group('angle_end')))
+ ])
+ else:
+ region = None
+ elif m_annulus is not None:
+ # case 1: annulus
+ region = OrderedDict([
+ ('shape', m_annulus.group('shape').lower()),
+ ('xc', float(m_annulus.group('xc'))),
+ ('yc', float(m_annulus.group('yc'))),
+ ('radius_in', float(m_annulus.group('radius_in'))),
+ ('radius_out', float(m_annulus.group('radius_out')))
+ ])
+ else:
+ region = None
+ return region
+
+
+def make_xflt(region):
+ """
+ Make a dictionary for the XFLT#### keywords and values according
+ to the provided region.
+
+ Return:
+ a dictionary containing the XFLT#### keywords and values, e.g.,
+ { 'XFLT0001': radius_out, 'XFLT0002': radius_out, 'XFLT0003': 0,
+ 'XFLT0004': angle_begin, 'XFLT0005': angle_end }
+ """
+ if region.get('shape') == 'annulus':
+ xflt = OrderedDict([
+ ('XFLT0001', region.get('radius_out')),
+ ('XFLT0002', region.get('radius_out')),
+ ('XFLT0003', 0)
+ ])
+ elif region.get('shape') == 'pie':
+ xflt = OrderedDict([
+ ('XFLT0001', region.get('radius_out')),
+ ('XFLT0002', region.get('radius_out')),
+ ('XFLT0003', 0),
+ ('XFLT0004', region.get('angle_begin')),
+ ('XFLT0005', region.get('angle_end'))
+ ])
+ else:
+ xflt = None
+ return xflt
+
+
+def add_xflt(fitsfile, xflt):
+ """
+ Add XFLT#### keywords to the given FITS file.
+ """
+ if xflt is not None:
+ for key, val in xflt.items():
+ cmd = 'fthedit "%(file)s+1" keyword="%(key)s" operation=add value="%(val)s"' % \
+ {'file': fitsfile, 'key': key, 'val': val}
+ print("CMD: %s" % cmd, file=sys.stderr)
+ subprocess.call(cmd, shell=True)
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Add XFLT???? keywords to spectrum header")
+ parser.add_argument("-V", "--version", action="version",
+ version="%(prog)s " + "%s (%s)" % (__version__, __date__))
+ parser.add_argument("spectrum", help="input spectrum; @stack")
+ parser.add_argument("region", help="extraction region of this spectrum; @stack")
+ parser.add_argument("arcmin2pix", nargs='?', help="1 arcmin = ? pixel",
+ default=1.0, type=float)
+ args = parser.parse_args()
+
+ if args.spectrum[0] == '@' and args.region[0] == '@':
+ spectrum = map(str.strip, open(args.spectrum[1:]).readlines())
+ regionstr = map(str.strip, open(args.region[1:]).readlines())
+ else:
+ spectrum = [ args.spectrum ]
+ regionstr = [ args.region ]
+
+ for spec, reg in zip(spectrum, regionstr):
+ print("SPECTRUM: '%s'" % spec)
+ print("REGION: '%s'" % reg)
+ region = parse_region(reg)
+ if region is None:
+ print("ERROR: invalid region %s" % reg, file=sys.stderr)
+ sys.exit(11)
+ else:
+ # Convert pixel to arcmin
+ region['radius_in'] = region['radius_in'] / args.arcmin2pix
+ region['radius_out'] = region['radius_out'] / args.arcmin2pix
+ xflt = make_xflt(region)
+ add_xflt(spec, xflt)
+
+
+if __name__ == "__main__":
+ main()
+
diff --git a/astro/chandra/blanksky_add_time.py b/astro/chandra/blanksky_add_time.py
new file mode 100755
index 0000000..8db3c2b
--- /dev/null
+++ b/astro/chandra/blanksky_add_time.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Aaron LI
+# 2015/06/16
+#
+# Changelogs:
+# 0.2.0, 2015/06/16, Aaron LI
+# * append the new time column to the *last*, rather than inserting
+# to the beginning
+# * explicitly update header from the new generated table
+#
+# BUGS:
+# * comments of columns will lost after modified by astropy.io.fits,
+# which is a bug with this package
+#
+
+"""
+Add a time column for the chandra blanksky event file.
+The time data are generated with a uniform distribution
+between TSTART and TSTOP.
+"""
+
+__version__ = "0.2.0"
+__date__ = "2015/06/16"
+
+import sys
+import argparse
+
+import numpy as np
+try:
+ from astropy.io import fits
+except ImportError:
+ try:
+ import pyfits as fits
+ except ImportError:
+ raise ImportError("cannot import 'astropy.io.fits' or 'pyfits'")
+
+
+def add_time_column(fitsfile, blockname="EVENTS"):
+ """
+ Add a time column to the specified block of the input fits file.
+ The time data are generated with a uniform distribution
+ between TSTART and TSTOP.
+
+ Return:
+ A fits object with the new time column.
+ """
+ if isinstance(fitsfile, str):
+ fitsfile = fits.open(fitsfile)
+ table = fitsfile[blockname]
+ tstart = table.header["TSTART"]
+ tstop = table.header["TSTOP"]
+ counts = len(table.data)
+ time_data = np.random.uniform(tstart, tstop, counts)
+ time_col = fits.Column(name="time", format="1D", unit="s", array=time_data)
+ # NOTE: append the new time column to the *last*!
+ # Otherwise the TLMIN??/TLMAX?? keyword pairs, which record the
+ # minimum/maximum values of corresponding columns, will become
+ # *out of order*. Therefore the output FITS file causes weird problems
+ # with DS9 and DM tools.
+ newtable = fits.BinTableHDU.from_columns(
+ table.columns + fits.ColDefs([time_col]))
+ fitsfile[blockname].data = newtable.data
+ # update header
+ fitsfile[blockname].header.update(newtable.header)
+ return fitsfile
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Add a time column for Chandra blanksky event file.")
+ parser.add_argument("-V", "--version", action="version",
+ version="%(prog)s " + "%s (%s)" % (__version__, __date__))
+ parser.add_argument("infile", help="input chandra blanksky file")
+ parser.add_argument("outfile", nargs="?", default=None,
+ help="modified blanksky file. IN-PLACE modification if omitted.")
+ parser.add_argument("-C", "--clobber", dest="clobber",
+ action="store_true", help="overwrite output file if exists")
+ args = parser.parse_args()
+
+ newfits = add_time_column(args.infile)
+ if args.outfile:
+ newfits.writeto(args.outfile, clobber=args.clobber)
+ else:
+ newfits.writeto(args.infile, clobber=True)
+
+
+if __name__ == "__main__":
+ main()
+
diff --git a/astro/fits/merge_fits.py b/astro/fits/merge_fits.py
new file mode 100755
index 0000000..e014c69
--- /dev/null
+++ b/astro/fits/merge_fits.py
@@ -0,0 +1,234 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Aaron LI
+# 2015/06/16
+#
+# Changelogs:
+# 0.3.0, 2015/06/17, Aaron LI
+# * added argument '-c/--columns' to specify columns to be merged
+# * added argument 'columns' to function 'merge2fits()'
+# 0.2.0, 2015/06/17, Aaron LI
+# * added function 'del_key_startswith()' to delete header keywords,
+# and the deletion must be repeated until the header length does not
+# decrease any more
+# * ignore the header of the second FITS file to avoid keyword conflictions
+#
+# BUGS:
+# * astropy.io.fits may have bug with header keywords deletion
+#
+# TODO:
+# * to support image FITS merge
+# * to allow specify column list to be merged
+#
+
+"""
+Merge several (>=2) of FITS file.
+
+By default the *first* extend tables are merged and write out to a new
+FITS file containing the *common* columns. If the data types of the
+columns of each FITS table do not match, then the data type of the column
+of the *first* FITS table is used, and other columns are coerced.
+
+If the FITS files have only *1* HDU (i.e., the Primary HDU), then data of
+these HDU's are summed up to make up the output FITS file (an image),
+on conditional that the shapes of all these HDU's are the same.
+"""
+
+__version__ = "0.3.0"
+__date__ = "2015/06/17"
+
+# default blockname to be merged
+BLOCKNAME_DFT = "EVENTS"
+
+DEBUG = True
+
+import sys
+import argparse
+import re
+
+import numpy as np
+try:
+ from astropy.io import fits
+except ImportError:
+ try:
+ import pyfits as fits
+ except ImportError:
+ raise ImportError("cannot import 'astropy.io.fits' or 'pyfits'")
+
+
+def merge2fits(file1, file2, block1=1, block2=1, columns=None):
+ """
+ Merge *two* FITS files of the given blocks (extension table),
+ and return the merged FITS object.
+
+ TODO:
+ * log history to header
+
+ Arguments:
+ file1, file2: input two FITS files
+ block1, block2: table number or table name to be merged
+ columns: the columns to be merged; by default to merge the
+ common columns
+
+ Return:
+ the merged FITS object
+ """
+ # open file if provide filename
+ if isinstance(file1, str):
+ file1 = fits.open(file1)
+ if isinstance(file2, str):
+ file2 = fits.open(file2)
+ # if has only *1* HDU => image
+ if len(file1) == 1:
+ block1 = 0
+ if len(file2) == 1:
+ block2 = 0
+ if block1 == 0 or block2 == 0:
+ # TODO
+ raise NotImplementedError("image FITS merge currently not supported!")
+ # get table to be merged
+ table1 = file1[block1]
+ table2 = file2[block2]
+ # create column names to be merged
+ # get names of all columns (convert to upper case)
+ colnames1 = [col.name.upper() for col in table1.columns]
+ colnames2 = [col.name.upper() for col in table2.columns]
+ colnames_common = list(set(colnames1).intersection(set(colnames2)))
+ # sort the common column names acoording original column orders
+ colnames_common.sort(key = lambda x: colnames1.index(x))
+ if columns is not None:
+ if isinstance(columns, list):
+ columnlist = list(map(str.upper, columns))
+ else:
+ columnlist = list(columns.upper())
+ # check the specified columns whether in the above colnames_common
+ for name in columnlist:
+ if name not in colnames_common:
+ raise ValueError("column '%s' not found in both files" % name)
+ # use the specified columns
+ colnames_common = columnlist
+ # "STATUS" columns don't have equal-length format, so remove it
+ if "STATUS" in colnames_common:
+ colnames_common.remove("STATUS")
+ if DEBUG:
+ print("DEBUG: columns to merge: ", colnames_common, file=sys.stderr)
+ # filter out the common columns
+ nrow1 = table1.data.shape[0]
+ nrow2 = table2.data.shape[0]
+ hdu_merged = fits.BinTableHDU.from_columns(
+ fits.ColDefs([table1.columns[name] for name in colnames_common]),
+ nrows=nrow1+nrow2)
+ for name in colnames_common:
+ if DEBUG:
+ print("DEBUG: merging column: ", name, file=sys.stderr)
+ dtype = hdu_merged.columns[name].array.dtype
+ hdu_merged.columns[name].array[nrow1:] = \
+ table2.columns[name].array.astype(dtype)
+ # process headers, based on the header of the first FITS file
+ # DO NOT strip the base header, in order to keep the position of
+ # XTENSION/BITPIX/NAXIS/NAXIS1/NAXIS2/PCOUNT/GCOUNT/TFIELDS keywords.
+ header = table1.header.copy() # do not strip
+ # IGNORE the header of the second FITS file to avoid keyword conflictions.
+ #header2 = table2.header.copy(strip=True)
+ ## merge two headers; COMMENT and HISTORY needs special handle
+ #for comment in header2["COMMENT"]:
+ # header.add_comment(comment)
+ #for history in header2["HISTORY"]:
+ # header.add_history(history)
+ #if "COMMENT" in header2:
+ # del header2["COMMENT"]
+ #if "HISTORY" in header2:
+ # del header2["HISTORY"]
+ #if "" in header2:
+ # del header2[""]
+ #header.update(header2)
+ # remove the original TLMIN??/TLMAX??/TTYPE??/TFORM??/TUNIT?? keywords
+ del_key_startswith(header,
+ startswith=["TLMIN", "TLMAX", "TTYPE", "TFORM", "TUNIT"],
+ lastlength=len(header))
+ # update with new TLMIN??/TLMAX??/TTYPE??/TFORM??/TUNIT?? keywords
+ header.update(hdu_merged.header)
+ hdu_merged.header = header
+ # copy PrimaryHDU from first FITS
+ primary_hdu = file1[0].copy()
+ # make HDUList and return
+ return fits.HDUList([primary_hdu, hdu_merged])
+
+
+def del_key_startswith(header, startswith, lastlength=0):
+ """
+ Delete the keys which start with the specified strings.
+
+ Arguments:
+ header: FITS table header
+ startswith: a list of strings; If a key starts with any
+ of these strings, then the key-value pair is removed.
+
+ XXX: the deletion must be repeated several times until the
+ length of the header does not decrease any more.
+ (This may be a bug related to the astropy.io.fits???)
+ """
+ if not isinstance(startswith, list):
+ startswith = list(startswith)
+ re_key = re.compile(r"^(%s)" % "|".join(startswith), re.I)
+ for k in header.keys():
+ if re_key.match(k):
+ del header[k]
+ curlength = len(header)
+ if lastlength == curlength:
+ return
+ else:
+ # recursively continue deletion
+ if DEBUG:
+ print("DEBUG: recursively continue header keywords deleteion",
+ file=sys.stderr)
+ del_key_startswith(header, startswith, curlength)
+
+
+def get_filename_blockname(pstr):
+ """
+ Separate privided 'pstr' (parameter string) into filename and
+ blockname. If does not have a blockname, then the default
+ blockname returned.
+ """
+ try:
+ filename, blockname = re.sub(r"[\[\]]", " ", pstr).split()
+ except ValueError:
+ filename = pstr
+ blockname = BLOCKNAME_DFT
+ return (filename, blockname)
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Merge several FITS files with the common columns.")
+ parser.add_argument("-V", "--version", action="version",
+ version="%(prog)s " + "%s (%s)" % (__version__, __date__))
+ parser.add_argument("infile1", help="input FITS file 1; " + \
+ "The blockname can be appended, e.g., infile1.fits[EVENTS]")
+ parser.add_argument("infile2", nargs="+",
+ help="input FITS file 2 and more")
+ parser.add_argument("outfile", help="merged output file")
+ parser.add_argument("-c", "--columns", dest="columns",
+ help="list of columns to be merged (comma separated)")
+ parser.add_argument("-C", "--clobber", dest="clobber",
+ action="store_true", help="overwrite output file if exists")
+ args = parser.parse_args()
+ if DEBUG:
+ print("DEBUG: infile2: ", args.infile2, file=sys.stderr)
+
+ if args.columns:
+ columns = args.columns.upper().replace(",", " ").split()
+ file1, block1 = get_filename_blockname(args.infile1)
+ merged_fits = fits.open(file1)
+ for fitsfile in args.infile2:
+ # split filename and block name
+ file2, block2 = get_filename_blockname(fitsfile)
+ merged_fits = merge2fits(merged_fits, file2, block1, block2, columns)
+ merged_fits.writeto(args.outfile, checksum=True, clobber=args.clobber)
+
+
+if __name__ == "__main__":
+ main()
+
diff --git a/astro/fitting/fit_betasbp_cut.py b/astro/fitting/fit_betasbp_cut.py
new file mode 100755
index 0000000..2d2931e
--- /dev/null
+++ b/astro/fitting/fit_betasbp_cut.py
@@ -0,0 +1,458 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# To fitting the given SBP data with the following beta model:
+# s = s0 * pow((1.0+(r/rc)^2), 0.5-3*beta) + c
+# And this tool supports the following two requirements for the fitting:
+# (1) ignore the specified number of inner-most data points;
+# (2) ignore the data points whose radius value less than the given value.
+#
+# Aaron LI
+# 2015/05/29
+#
+# Changelogs:
+# v0.5.1, 2015/06/07, Aaron LI
+# * fixed 'dof' calculation with 'n-p-1'
+# * fixed 'par_eps' calculation/configuration
+# v0.5.0, 2015/06/07, Aaron LI
+# * added 'fit_model_bounds' using 'scipy.optimize.minimize' to
+# perform function minimization with bounds
+# * split the data cut section to function 'cut_data'
+# * added argument 'options' to 'fit_model_bounds'
+# v0.4.0, 2015/06/06, Aaron LI
+# * replace getopt with 'argparse'
+# * added 'get_parameter' to process model parameter initial value and bounds
+# * support read parameter bounds from input file
+# * added options '--s0', '--rc', '--beta', '--const' to get paramter
+# initial values and bounds
+# * renamed 'fit_beta_model' to 'fit_model', and added argument 'func' to
+# support other models
+# v0.3.0, 2015/06/02, Aaron LI
+# * can output chi-squared and dof values
+# * can output one standard deviation errors on the parameters
+# v0.2.0, 2015/05/30:
+# * Added option '-n' / '--no-radius' to ignore radius less than the
+# given value.
+# * Support read model initial parameter values from input file.
+#
+# TODO:
+# * calculate fitting parameter's confidence interval / confidence bounds
+# * to normalize fitting paramters to be the same order of magnitude
+# for better minimization
+# Ref: http://stackoverflow.com/questions/21369139/normalization-for-optimization-in-python
+#
+
+
+from __future__ import print_function, division
+
+__version__ = "0.5.1"
+__date__ = "2015/06/07"
+
+import numpy as np
+from scipy.optimize import curve_fit, minimize
+
+import os
+import sys
+import re
+import argparse
+
+
+# modes of to cut data
+CUT_POINT = 'CUT_POINT'
+CUT_RADIUS = 'CUT_RADIUS'
+# default minimize method
+MINIMIZE_METHOD = 'L-BFGS-B'
+
+
+def beta_model(r, s0, rc, beta, c):
+ """
+ SBP beta model, with a constant background.
+ """
+ return s0 * np.power((1.0+(r/rc)**2), 0.5-3*beta) + c
+
+
+def calc_chisq(func, xdata, ydata, yerrdata, *args):
+ """
+ Calculate the chi-squared values for the given function according
+ to the provided data points.
+
+ Arguments:
+ xdata: x values of data points
+ ydata: y values of data points
+ yerrdata: y standard deviation values
+ args: additional arguments for 'func'
+
+ Return:
+ chi-squared value
+ """
+ xdata = np.array(xdata)
+ ydata = np.array(ydata)
+ yerrdata = np.array(yerrdata)
+ return np.sum(((ydata - func(xdata, *args)) / yerrdata) ** 2)
+
+
+def fit_model(func, xdata, ydata, yerrdata, p0):
+ """
+ Fit the provided data with the beta model.
+
+ Arguments:
+ p0: initial values for the parameters of beta model
+
+ Return:
+ (popt, infodict)
+ popt: optimal values for the parameters
+ infodict:
+ * fvec: the function evaluated at the output parameters
+ * dof: degree of freedom
+ * chisq: chi-squared
+ * perr: one standard deviation errors on the parameters
+ """
+ popt, pcov = curve_fit(func, xdata, ydata, p0=p0, sigma=yerrdata)
+ # the function evaluated at the output parameters
+ fvec = lambda x: func(x, *popt)
+ # degree of freedom
+ dof = len(xdata) - len(popt) - 1
+ # chi squared
+ chisq = np.sum(((ydata - fvec(xdata)) / yerrdata) ** 2)
+ # one standard deviation errors on the parameters
+ perr = np.sqrt(np.diag(pcov))
+ infodict = {
+ 'fvec': fvec,
+ 'dof': dof,
+ 'chisq': chisq,
+ 'perr': perr
+ }
+ return (popt, infodict)
+
+
+def fit_model_bounds(func, xdata, ydata, yerrdata, p0=None,
+ bounds=None, options=None):
+ """
+ Fit the provided data with the beta model, and apply parameter
+ bounds requirements.
+ Using 'scipy.optimize.minimize'.
+
+ Arguments:
+ p0: initial values for the parameters of beta model
+ bounds: (min, max) pairs for each parameter bound
+ options: a dict of solver options (=> minimize: options)
+
+ Return:
+ (popt, infodict)
+ popt: optimal values for the parameters
+ infodict:
+ * fvec: the function evaluated at the output parameters
+ * dof: degree of freedom
+ * chisq: chi-squared
+ * perr: one standard deviation errors on the parameters
+ """
+ # objective function to be minimized, required format of 'f(x, *args)'
+ f = lambda p: calc_chisq(func, xdata, ydata, yerrdata, *p)
+ # minimize the given function using 'scipy.optimize.minimize' with bounds
+ res = minimize(f, p0, method=MINIMIZE_METHOD, bounds=bounds,
+ options=options)
+ popt = res.x
+ #print("DEBUG: minimization results:\n", res, file=sys.stderr)
+
+ # check minimization results
+ if not res.success:
+ print("*** WARNING: minimization exited with error: ***\n" + \
+ "*** %s ***" % res.message, file=sys.stderr)
+
+ # the function evaluated at the output parameters
+ fvec = lambda x: func(x, *popt)
+ # degree of freedom
+ dof = len(xdata) - len(popt) - 1
+ # chi squared
+ chisq = res.fun
+ # one standard deviation errors on the parameters
+ perr = popt * 0.0 # FIXME
+ infodict = {
+ 'fvec': fvec,
+ 'dof': dof,
+ 'chisq': chisq,
+ 'perr': perr
+ }
+ return (popt, infodict)
+
+
+def cut_data(xdata, ydata, yerrdata, cutmode=CUT_POINT, cutvalue=0):
+ """
+ Cut the given data with the provided cutmode and cutvalue,
+ return the cut data.
+
+ Arguments:
+ xdata, ydata, yerrdata: input data (x, y, yerr)
+ cutmode: 'point' / 'radius'; ignore data by number of data points,
+ or by radius value less than the given value
+ cutvalue: the cut limit
+
+ Return:
+ (xdata_cut, ydata_cut, yerrdata_cut)
+ """
+ if cutmode == CUT_POINT:
+ xdata_cut = xdata[cutvalue:]
+ ydata_cut = ydata[cutvalue:]
+ yerrdata_cut = yerrdata[cutvalue:]
+ elif cutmode == CUT_RADIUS:
+ ii = xdata >= cutvalue
+ xdata_cut = xdata[ii]
+ ydata_cut = ydata[ii]
+ yerrdata_cut = yerrdata[ii]
+ else:
+ raise ValueError('Unknown cut mode: %s' % cutmode)
+ return (xdata_cut, ydata_cut, yerrdata_cut)
+
+
+def get_parameter(pstring):
+ """
+ Process the parameter string of the following format, and return
+ the initial value, lower limit, and upper limit.
+ "init_value"
+ "init_value lower upper"
+ "init_value,lower,upper"
+ If want to ignore the lower/upper limit, use 'None' (case-insensitive),
+ and the None is returned.
+ """
+ parameters = pstring.replace(',', ' ').split()
+ if len(parameters) == 1:
+ init_value = float(parameters[0])
+ return (init_value, None, None)
+ elif len(parameters) == 3:
+ init_value = float(parameters[0])
+ if parameters[1].upper() == 'NONE':
+ lower_value = None
+ else:
+ lower_value = float(parameters[1])
+ if parameters[2].upper() == 'NONE':
+ upper_value = None
+ else:
+ upper_value = float(parameters[2])
+ return (init_value, lower_value, upper_value)
+ else:
+ raise ValueError('Invalid parameter format: %s' % pstring)
+
+
+def main():
+ # options
+ infile = None
+ outfilename= None
+ cutmode = CUT_POINT # ignore data by number of data points
+ cutvalue = 0 # do not ignore any data by default
+ # initial values for the four parameters of the beta model
+ s0_0 = 1.0e-7
+ rc_0 = 10.0
+ beta_0 = 0.6
+ c_0 = 0.0
+ # default bounds for the four parameters
+ s0_lower, s0_upper = None, None
+ rc_lower, rc_upper = None, None
+ beta_lower, beta_upper = None, None
+ c_lower, c_upper = None, None
+
+ # parser for command line options and arguments
+ parser = argparse.ArgumentParser(
+ description="Fitting provided data with the beta model.",
+ epilog="Version: %s (%s)" % (__version__, __date__))
+ parser.add_argument("-v", "--verbose", dest="verbose",
+ action="store_true", default=False,
+ help="show verbose/debug information (False)")
+ parser.add_argument("-V", "--version", action="version",
+ version="%(prog)s " + "%s (%s)" % (__version__, __date__))
+ parser.add_argument("-i", "--infile",
+ dest="infile", required=True,
+ help="""input data file with the following 4 or 3 columns:
+ [radius radius_err brightness brightness_err],
+ [radius brightness brightness_err].
+ Note: The initial values and lower/upper limits
+ for the beta models can also be provided with the
+ following syntax:
+ # s0_0 = init_value, lower_limit, upper_limit
+ # rc_0 = init_value, lower_limit, upper_limit
+ # beta_0 = init_value, lower_limit, upper_limit
+ # c_0 = init_value, lower_limit, upper_limit""")
+ parser.add_argument("-o", "--outfile", dest="outfilename",
+ help="output file to store the fitted data")
+ parser.add_argument("-b", "--bounds", dest="bounds",
+ action="store_true", default=False,
+ help="whether apply paramenter bounds (False)")
+ parser.add_argument("-c", "--cut-point",
+ dest="cut_point", metavar="N", type=int, default=0,
+ help="number of inner-most data points to be ignored")
+ parser.add_argument("-n", "--no-radius",
+ dest="cut_radius", metavar="RADIUS", type=float, default=0.0,
+ help="ignore data points with smaller radius")
+ parser.add_argument("--s0", dest="s0",
+ metavar="init_value,lower,upper",
+ help="initial value and lower/upper limits for parameter s0. " + \
+ "Use 'none' (case-insensitive) to ignore the limit")
+ parser.add_argument("--rc", dest="rc",
+ metavar="init_value,lower,upper",
+ help="initial value and lower/upper limits for parameter rc. " + \
+ "Use 'none' (case-insensitive) to ignore the limit")
+ parser.add_argument("--beta", dest="beta",
+ metavar="init_value,lower,upper",
+ help="initial value and lower/upper limits for parameter beta. " + \
+ "Use 'none' (case-insensitive) to ignore the limit")
+ parser.add_argument("--const", dest="const",
+ metavar="init_value,lower,upper",
+ help="initial value and lower/upper limits for parameter const. " + \
+ "Use 'none' (case-insensitive) to ignore the limit")
+
+ args = parser.parse_args()
+ if args.outfilename:
+ outfile = open(args.outfilename, 'w')
+ else:
+ outfile = sys.stdout
+ # cut mode and value
+ if args.cut_point:
+ cutmode = CUT_POINT
+ cutvalue = args.cut_point
+ elif args.cut_radius:
+ cutmode = CUT_RADIUS
+ cutvalue = args.cut_radius
+
+ if args.verbose:
+ print('DEBUG: apply parameter bounds: %s' % args.bounds,
+ file=sys.stderr)
+ print("DEBUG: cutmode: %s, cutvalue: %s" % (cutmode, cutvalue))
+
+ # input data list
+ r_data = []
+ rerr_data = []
+ s_data = []
+ serr_data = []
+
+ # regex to match initial parameter names, blank line, and comment line
+ re_blank = re.compile(r'^\s*$')
+ re_comment = re.compile(r'^\s*#')
+ re_s0 = re.compile(r'^\s*#\s*s0_0\s*[:=]')
+ re_rc = re.compile(r'^\s*#\s*rc_0\s*[:=]')
+ re_beta = re.compile(r'^\s*#\s*beta_0\s*[:=]')
+ re_c = re.compile(r'^\s*#\s*c_0\s*[:=]')
+ for line in open(args.infile, 'r'):
+ if re_s0.match(line):
+ # read 's0_0': initial value for parameter 's0'
+ s0_pstring = re_s0.split(line)[1]
+ s0_0, s0_lower, s0_upper = get_parameter(s0_pstring)
+ elif re_rc.match(line):
+ # read 'rc_0': initial value for parameter 'rc'
+ rc_pstring = re_rc.split(line)[1]
+ rc_0, rc_lower, rc_upper = get_parameter(rc_pstring)
+ elif re_beta.match(line):
+ # read 'beta_0': initial value for parameter 'beta'
+ beta_pstring = re_beta.split(line)[1]
+ beta_0, beta_lower, beta_upper = get_parameter(beta_pstring)
+ elif re_c.match(line):
+ # read 'c_0': initial value for parameter 'c'
+ c_pstring = re_c.split(line)[1]
+ c_0, c_lower, c_upper = get_parameter(c_pstring)
+ elif re_blank.match(line):
+ # ignore blank line
+ continue
+ elif re_comment.match(line):
+ # ignore comment line
+ continue
+ else:
+ try:
+ r, rerr, s, serr = map(float, line.split())
+ except ValueError:
+ try:
+ r, s, serr = map(float, line.split())
+ rerr = 0.0
+ except ValueError:
+ print('ERROR: unsupported input data format',
+ file=sys.stderr)
+ sys.exit(21)
+ r_data.append(r)
+ rerr_data.append(rerr)
+ s_data.append(s)
+ serr_data.append(serr)
+
+ if args.verbose:
+ print('DEBUG: infile: s0_0 = %g (%s, %s)' % \
+ (s0_0, s0_lower, s0_upper), file=sys.stderr)
+ print('DEBUG: infile: rc_0 = %g (%s, %s)' % \
+ (rc_0, rc_lower, rc_upper), file=sys.stderr)
+ print('DEBUG: infile: beta_0 = %g (%s, %s)' % \
+ (beta_0, beta_lower, beta_upper), file=sys.stderr)
+ print('DEBUG: infile: c_0 = %g (%s, %s)' % \
+ (c_0, c_lower, c_upper), file=sys.stderr)
+
+ # get parameter initial values and bounds from command line arguments
+ if args.s0:
+ s0_0, s0_lower, s0_upper = get_parameter(args.s0)
+ if args.rc:
+ rc_0, rc_lower, rc_upper = get_parameter(args.rc)
+ if args.beta:
+ beta_0, beta_lower, beta_upper = get_parameter(args.beta)
+ if args.const:
+ c_0, c_lower, c_upper = get_parameter(args.const)
+
+ if args.verbose:
+ print('DEBUG: final: s0_0 = %g (%s, %s)' % \
+ (s0_0, s0_lower, s0_upper), file=sys.stderr)
+ print('DEBUG: final: rc_0 = %g (%s, %s)' % \
+ (rc_0, rc_lower, rc_upper), file=sys.stderr)
+ print('DEBUG: final: beta_0 = %g (%s, %s)' % \
+ (beta_0, beta_lower, beta_upper), file=sys.stderr)
+ print('DEBUG: final: c_0 = %g (%s, %s)' % \
+ (c_0, c_lower, c_upper), file=sys.stderr)
+
+ # convert to numpy array
+ r_data = np.array(r_data)
+ rerr_data = np.array(rerr_data)
+ s_data = np.array(s_data)
+ serr_data = np.array(serr_data)
+ # cut data
+ r_data_cut, s_data_cut, serr_data_cut = cut_data(r_data, s_data,
+ serr_data, cutmode=cutmode, cutvalue=cutvalue)
+
+ # model parameters
+ par_names = ["s0", "rc", "beta", "c"]
+ # initial values
+ par_0 = np.array([s0_0, rc_0, beta_0, c_0])
+ # parameter bounds
+ par_bounds = np.array([(s0_lower, s0_upper), (rc_lower, rc_upper),
+ (beta_lower, beta_upper), (c_lower, c_upper)])
+ # set eps for the parameters (required for the minimize method,
+ # otherwise error 'ABNORMAL_TERMINATION_IN_LNSRCH' occurs, which
+ # may due to the different order of magnitude of each parameters)
+ par_eps = np.absolute(par_0) * 1e-3
+ par_eps[ par_eps<1e-15 ] = 1e-15 # set par_eps >= 1e-14
+ if args.verbose:
+ print("DEBUG: parameters eps:\n", par_eps, file=sys.stderr)
+
+ if args.bounds:
+ ## 'fit_model_bounds' to perform fitting with bounds
+ par_fit, infodict = fit_model_bounds(beta_model, r_data_cut,
+ s_data_cut, serr_data_cut, p0=par_0, bounds=par_bounds,
+ options={'eps': par_eps})
+ else:
+ # 'fit_model' do not support parameter bounds
+ par_fit, infodict = fit_model(beta_model, r_data_cut,
+ s_data_cut, serr_data_cut, p0=par_0)
+
+ fvec = infodict['fvec']
+ dof = infodict['dof']
+ chisq = infodict['chisq']
+ perr = infodict['perr']
+
+ print("# beta-model fitting results:", file=outfile)
+ print("# s(r) = s0 * pow((1.0+(r/rc)^2), 0.5-3*beta) + c", file=outfile)
+ for i in range(len(par_names)):
+ print("# %s = %g +/- %g" % (par_names[i], par_fit[i], perr[i]),
+ file=outfile)
+ print("# chisq / dof = %g / %g = %g" % (chisq, dof, chisq/dof),
+ file=outfile)
+ print("# radius(input) brightness(fitted)", file=outfile)
+ for i in range(len(r_data)):
+ print("%g %g" % (r_data[i], fvec(r_data[i])), file=outfile)
+
+ if args.outfilename:
+ outfile.close()
+
+
+if __name__ == '__main__':
+ main()
+
diff --git a/astro/lc_clean.py b/astro/lc_clean.py
new file mode 100755
index 0000000..7141002
--- /dev/null
+++ b/astro/lc_clean.py
@@ -0,0 +1,151 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Aaron LI
+# Created: 2016-01-16
+# Updated: 2016-01-16
+#
+
+"""
+Clean the lightcurve by fitting the RATE data with a Gaussian model,
+and discard the time bins with RATE beyond [mean-n*sigma, mean+n*sigma].
+"""
+
+__version__ = "0.1.0"
+__date__ = "2016-01-16"
+
+import sys
+import argparse
+
+from astropy.io import fits
+import numpy as np
+
+
+class LightCurve:
+ """
+ X-ray data light curve class
+ """
+ def __init__(self, lcfile):
+ f = fits.open(lcfile)
+ self.lc_data = f[1].data
+ self.lc_header = f[1].header
+ self.time = self.lc_data['TIME']
+ self.rate = self.lc_data['RATE']
+ self.rate_err = self.lc_data['ERROR']
+ self.TSTART = self.lc_header['TSTART']
+ self.TSTOP = self.lc_header['TSTOP']
+ self.TIMEDEL = self.lc_header['TIMEDEL']
+ self.TIMEPIXR = self.lc_header['TIMEPIXR']
+ f.close()
+
+ def sigma_clip(self, nsigma=3, maxiter=10):
+ """
+ Iteratively clip the time bins whose value lie beyond the
+ range [mean-n*sigma, mean+n*sigma].
+ """
+ rate = self.rate
+ keep_idx = np.ones(rate.shape, dtype=bool) # all True's
+ keep_num = np.sum(keep_idx)
+ keep_num0 = np.inf
+ i = 0
+ while (keep_num < keep_num0):
+ if (i >= maxiter):
+ print("WARNING: maximum iteration limit reached",
+ file=sys.stderr)
+ break
+ keep_num0 = keep_num
+ i += 1
+ mean = np.mean(rate[keep_idx])
+ sigma = np.std(rate[keep_idx])
+ cut_low = mean - nsigma * sigma
+ cut_high = mean + nsigma * sigma
+ keep_idx = np.logical_and((rate >= cut_low), (rate <= cut_high))
+ keep_num = np.sum(keep_idx)
+ # save clip results
+ self.niter = i
+ self.keep_idx = keep_idx
+ self.time_clipped = self.time[keep_idx]
+ self.rate_clipped = self.rate[keep_idx]
+
+ def make_gti(self, apply_header=True):
+ """
+ Make new GTIs (good time intervals) according to the clipped
+ time bins.
+ """
+ frac = 0.01 # TIMEDEL fraction to distingush two time bins
+ gti_start = []
+ gti_stop = []
+ time_start = self.time_clipped
+ time_stop = time_start + self.TIMEDEL
+ # first GTI start time
+ gti_start.append(time_start[0])
+ for tstart, tstop in zip(time_start[1:], time_stop[:-1]):
+ if (np.abs(tstart-tstop) <= frac * self.TIMEDEL):
+ # time bin continues
+ continue
+ else:
+ # a new GTI start
+ gti_start.append(tstart)
+ gti_stop.append(tstop)
+ # last GTI stop time
+ gti_stop.append(time_stop[-1])
+ # convert to numpy array
+ gti_start = np.array(gti_start)
+ gti_stop = np.array(gti_stop)
+ if apply_header:
+ # add TSTART to the time
+ gti_start += self.TSTART
+ gti_stop += self.TSTART
+ # save results
+ self.gti_start = gti_start
+ self.gti_stop = gti_stop
+
+ def write_gti(self, filename=None, header=True):
+ """
+ Write generated GTIs to file or screen (default)
+ """
+ if isinstance(filename, str):
+ outfile = open(filename, 'w')
+ else:
+ outfile = sys.stdout
+ #
+ if header:
+ outfile.write('# TSTART\tTSTOP\n')
+ outfile.write('\n'.join([ '%s\t%s' % (tstart, tstop) \
+ for tstart, tstop in zip(self.gti_start, self.gti_stop) ]))
+ #
+ if isinstance(filename, str):
+ outfile.close()
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Clean light curve by sigma clipping")
+ parser.add_argument("-V", "--version", action="version",
+ version="%(prog)s " + "%s (%s)" % (__version__, __date__))
+ parser.add_argument("infile",
+ help="input lightcurve file; contains [TIME, RATE] columns")
+ parser.add_argument("outfile", nargs='?', default=None,
+ help="output text-format GTI file; for XSELECT filter time")
+ parser.add_argument("-s", "--nsigma", dest="nsigma", type=float,
+ default=2.0, help="sigma clipping significant level")
+ parser.add_argument("-H", "--no-header", dest="noheader",
+ action="store_true", help="not write header to the output file")
+ parser.add_argument("-v", "--verbose", dest="verbose",
+ action="store_true", help="show verbose information")
+ args = parser.parse_args()
+
+ lc = LightCurve(args.infile)
+ lc.sigma_clip(nsigma=args.nsigma)
+ lc.make_gti(apply_header=True)
+ lc.write_gti(filename=args.outfile, header=(not args.noheader))
+ if args.verbose:
+ exposure = np.sum(lc.gti_stop - lc.gti_start)
+ print("# Total GTI: %.2f (s)" % exposure)
+
+
+if __name__ == "__main__":
+ main()
+
+
+# vim: set ts=4 sw=4 tw=0 fenc=utf-8 ft=python: #
diff --git a/astro/marx/marx_pntsrc.py b/astro/marx/marx_pntsrc.py
new file mode 100755
index 0000000..97b1575
--- /dev/null
+++ b/astro/marx/marx_pntsrc.py
@@ -0,0 +1,121 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Aaron LI
+# 2015/06/16
+
+"""
+Run MARX simulation on a given list of point sources, merge the
+output simulation results, and finally convert into FITS image.
+"""
+
+__version__ = "0.1.0"
+__date__ = "2015/06/16"
+
+import sys
+import argparse
+import subprocess
+import re
+import os
+
+
+def marx_pntsrc(pfile, ra, dec, flux, outdir):
+ """
+ Run MARX simulation for the provided point source.
+ """
+ cmd = "marx @@%(pfile)s SourceRA=%(ra)s " % {"pfile": pfile, "ra": ra} + \
+ "SourceDEC=%(dec)s SourceFlux=%(flux)s OutputDir=%(outdir)s" % \
+ {"dec": dec, "flux": flux, "outdir": outdir}
+ print("CMD: %s" % cmd, file=sys.stderr)
+ subprocess.call(cmd, shell=True)
+
+
+def marxcat(indirs, outdir):
+ """
+ Concatenate a list of MARX simulation results.
+
+ Note: the number of MARX results to be concatenated at *one* time
+ can not be to many, otherwise the 'marxcat' tool will failed.
+ """
+ if isinstance(indirs, list):
+ pass
+ elif isinstance(indirs, str):
+ indirs = indirs.split()
+ else:
+ raise ValueError("invalid indirs type: %s" % indirs)
+ pid = os.getpid()
+ tempdir = "_marx_tmp%d" % pid
+ cmd = "cp -a %(marxdir)s %(tempdir)s" % \
+ {"marxdir": indirs[0], "tempdir": tempdir}
+ print("CMD: %s" % cmd, file=sys.stderr)
+ subprocess.call(cmd, shell=True)
+ del indirs[0]
+ while len(indirs) > 0:
+ # concatenated 10 directories each time
+ catdirs = indirs[:9]
+ del indirs[:9]
+ catdirs = tempdir + " " + " ".join(catdirs)
+ # concatenate MARX results
+ cmd = "marxcat %(catdirs)s %(outdir)s" % \
+ {"catdirs": catdirs, "outdir": outdir}
+ print("CMD: %s" % cmd, file=sys.stderr)
+ subprocess.call(cmd, shell=True)
+ # move output results to temporary directory
+ cmd = "rm -rf %(tempdir)s && mv %(outdir)s %(tempdir)s" % \
+ {"tempdir": tempdir, "outdir": outdir}
+ print("CMD: %s" % cmd, file=sys.stderr)
+ subprocess.call(cmd, shell=True)
+ cmd = "mv %(tempdir)s %(outdir)s" % \
+ {"tempdir": tempdir, "outdir": outdir}
+ print("CMD: %s" % cmd, file=sys.stderr)
+ subprocess.call(cmd, shell=True)
+
+
+def marx2fits(indir, outfile, params=""):
+ """
+ Convert the results of MARX simulation into FITS image.
+ """
+ cmd = "marx2fits %(params)s %(indir)s %(outfile)s" % \
+ {"params": params, "indir": indir, "outfile": outfile}
+ print("CMD: %s" % cmd, file=sys.stderr)
+ subprocess.call(cmd, shell=True)
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Run MARX on a given list of point sources")
+ parser.add_argument("-V", "--version", action="version",
+ version="%(prog)s " + "%s (%s)" % (__version__, __date__))
+ parser.add_argument("pfile", help="marx paramter file")
+ parser.add_argument("srclist", help="point source list file")
+ parser.add_argument("outprefix", help="prefix of output directories")
+ args = parser.parse_args()
+
+ outdirs = []
+ i = 0
+ for line in open(args.srclist, "r"):
+ if re.match(r"^\s*$", line):
+ # skip blank line
+ continue
+ elif re.match(r"^\s*#", line):
+ # skip comment line
+ continue
+ i += 1
+ ra, dec, flux = map(float, line.split())
+ print("INFO: ra = %g, dec = %g, flux = %g" % (ra, dec, flux),
+ file=sys.stderr)
+ outdir = "%sp%03d" % (args.outprefix, i)
+ print("INFO: outdir = %s" % outdir, file=sys.stderr)
+ outdirs.append(outdir)
+ marx_pntsrc(args.pfile, ra, dec, flux, outdir)
+ # merge results
+ merged = args.outprefix + "merged"
+ marxcat(outdirs, merged)
+ # convert to FITS image
+ merged_fits = merged + ".fits"
+ marx2fits(merged, merged_fits)
+
+
+if __name__ == "__main__":
+ main()
+
diff --git a/astro/marx/randpoints.py b/astro/marx/randpoints.py
new file mode 100755
index 0000000..da0bedc
--- /dev/null
+++ b/astro/marx/randpoints.py
@@ -0,0 +1,327 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Aaron LI
+# Created: 2015-12-03
+# Updated: 2015-12-05
+#
+# ChangeLog:
+# 2015-12-05:
+# * Add class "RegionDS9" to parse region
+# 2015-12-10:
+# * Support both "erg/cm^2/s" and "photon/cm^2/s" flux units
+# * Add argument "--outregion" to also save a region file
+#
+
+"""
+Generate random point source information for MARX simulation.
+And make a point source data list for "marx_pntsrc.py" usage.
+"""
+
+__version__ = "0.3.1"
+__date__ = "2015-12-10"
+
+
+import sys
+import argparse
+import re
+import numpy as np
+
+
+class RegionDS9:
+ """
+ Process DS9 regions.
+ """
+ def __init__(self, shape=None, xc=None, yc=None,
+ width=None, height=None, rotation=None):
+ self.shape = shape
+ self.xc = xc
+ self.yc = yc
+ self.width = width
+ self.height = height
+ self.rotation = rotation
+
+ def parse(self, region):
+ """
+ Parse a DS9 region string and update the instance.
+
+ Region syntax:
+ box(xc,yc,width,height,rotation)
+
+ Note:
+ "width", "height" may have a '"' suffix which means "arcsec" instead
+ of "degree".
+ """
+ re_box = re.compile(r'^\s*(?P<shape>box)\(\s*(?P<xc>[\d.-]+)\s*,\s*(?P<yc>[\d.-]+)\s*,\s*(?P<width>[\d.-]+"?)\s*,\s*(?P<height>[\d.-]+"?)\s*,\s*(?P<rotation>[\d.-]+)\s*\).*$', re.I)
+ m_box = re_box.match(region)
+ if m_box is not None:
+ self.shape = "box"
+ self.xc = float(m_box.group("xc"))
+ self.yc = float(m_box.group("yc"))
+ self.width = self.parse_dms(m_box.group("width"))
+ self.height = self.parse_dms(m_box.group("height"))
+ self.rotation = float(m_box.group("rotation"))
+ else:
+ raise NotImplementedError("Only 'box' region supported")
+
+ @staticmethod
+ def parse_dms(ms):
+ """
+ Parse a value in format ?'?" into degree.
+ """
+ re_arcmin = re.compile(r'^\s*(?P<arcmin>[\d.]+)\'.*')
+ re_arcsec = re.compile(r'^([^\']*\'|)\s*(?P<arcsec>[\d.]+)".*')
+ m_arcmin = re_arcmin.match(ms)
+ m_arcsec = re_arcsec.match(ms)
+ degree = 0.0
+ if m_arcmin is not None:
+ degree += float(m_arcmin.group("arcmin")) / 60.0
+ if m_arcsec is not None:
+ degree += float(m_arcsec.group("arcsec")) / 3600.0
+ return degree
+
+
+class RandCoord:
+ """
+ Randomly generate the coordinates of point sources within a given box
+ region for MARX simulation.
+
+ Arguments:
+ xc - central X position of the box (degree)
+ yc - central Y position of the box (degree)
+ width - width of the box (degree)
+ height - height of the box (degree)
+ mindist - minimum distance between each generated coordinate (degree)
+ """
+
+ def __init__(self, xc, yc, width, height, mindist=0):
+ self.xc = xc
+ self.yc = yc
+ self.width = width
+ self.height = height
+ self.mindist = mindist
+ # Record the generated coordinates: [(x1,y1), (x2,y2), ...]
+ self.xy = []
+
+ def clear(self):
+ """
+ Clear previously generated coordinates.
+ """
+ self.xy = []
+
+ def generate(self, n=1):
+ """
+ Generate random coordinates.
+ """
+ coord = []
+ xmin = self.xc - 0.5 * self.width
+ xmax = self.xc + 0.5 * self.width
+ ymin = self.yc - 0.5 * self.height
+ ymax = self.yc + 0.5 * self.height
+ i = 0
+ while i < n:
+ x = np.random.uniform(low=xmin, high=xmax)
+ y = np.random.uniform(low=ymin, high=ymax)
+ if self.checkDistance((x, y)):
+ i += 1
+ coord.append((x, y))
+ self.xy.append((x, y))
+ return coord
+
+ def checkDistance(self, coord):
+ """
+ Check whether the given coordinate has a distance larger than
+ the specified "mindist"
+ """
+ if len(self.xy) == 0:
+ return True
+ else:
+ xy = np.array(self.xy) # each row represents one coordinate
+ dist2 = (xy[:, 0] - coord[0])**2 + (xy[:, 1] - coord[1])**2
+ if all(dist2 >= self.mindist**2):
+ return True
+ else:
+ return False
+
+
+class RandFlux:
+ """
+ Randomly generate the flux of point sources for MARX simulation.
+
+ Arguments:
+ fmin - minimum flux
+ fmax - maximum flux
+ """
+
+ def __init__(self, fmin, fmax):
+ self.fmin = fmin
+ self.fmax = fmax
+
+ @staticmethod
+ def fluxDensity(S):
+ """
+ The *differential* number count - flux function: dN(>S)/dS
+ i.e., density function
+
+ Broken power law:
+ dN/dS = (1) K (S/S_ref)^(-gamma_1); (S < S_b)
+ (2) K (S_b/S_ref)^(gamma_2-gamma_1) (S/S_ref)^(-gamma_2); (S >= S_b)
+ K: normalization constant
+ S_ref: normalization flux; [10^-15 erg/cm^2/s]
+ gamma_1: faint power-law index
+ gamma_2: bright power-law index
+ S_b: break flux; [10^-15 erg/cm^2/s]
+
+ Reference:
+ [1] Kim et al. 2007, ApJ, 659, 29
+ http://adsabs.harvard.edu/abs/2007ApJ...659...29K
+ http://hea-www.cfa.harvard.edu/CHAMP/PUBLICATIONS/ChaMP_ncounts.pdf
+ Table 4: ChaMP: 9.6 [deg^2]: 0.5-8 [keV]: 1.4 (photon index)
+ Differential number count; broken power law
+ K (normalization constant): 1557 (+28 / -50)
+ S_ref (normalization flux): 1.0 [10^-15 erg/cm^2/s]
+ gamma_1 (faint power-law index): 1.64 (+/- 0.01)
+ gamma_2 (bright power-law index): 2.48 (+/- 0.05)
+ S_b (break flux): 22.9 (+/- 1.6) [10^-15 erg/cm^2/s]
+ f_min (faint flux limit): 0.69 [10^-15 erg/cm^2/s]
+ f_max (bright flux limit): 6767.74 [10^-15 erg/cm^2/s]
+ """
+ K = 1557 # normalization constant: 1557 (+28 / -50)
+ S_ref = 1.0 # normalization flux: 1.0 [10^-15 erg/cm^2/s]
+ gamma_1 = 1.64 # faint power-law index: 1.64 (+/- 0.01)
+ gamma_2 = 2.48 # bright power-law index: 2.48 (+/- 0.05)
+ S_b = 22.9 # break flux: 22.9 (+/- 1.6) [10^-15 erg/cm^2/s]
+ # Adjust unit/magnitude
+ S = S / 1e-15 # => unit: 10^-15 erg/cm^2/s
+ if isinstance(S, np.ndarray):
+ Np = np.zeros(S.shape)
+ Np[S<=0] = 0.0
+ Np[S<=S_b] = K * (S[S<=S_b] / S_ref)**(-gamma_1)
+ Np[S>S_b] = K * (S_b/S_ref)**(gamma_2-gamma_1) * (S[S>S_b] / S_ref)**(-gamma_2)
+ else:
+ # "S" is a single number
+ if S <= 0.0:
+ Np = 0.0
+ elif S <= S_b:
+ Np = K * (S/S_ref)**(-gamma_1)
+ else:
+ Np = K * (S_b/S_ref)**(gamma_2-gamma_1) * (S/S_ref)**(-gamma_2)
+ #
+ return Np
+
+ def generate(self, n=1):
+ """
+ Generate a sample of luminosity values within [min, max] from
+ the above luminosity distribution.
+ """
+ results = []
+ # Get the maximum value of the flux number density function,
+ # which is a monotonically decreasing.
+ M = self.fluxDensity(self.fmin)
+ for i in range(n):
+ while True:
+ u = np.random.uniform() * M
+ y = 10 ** np.random.uniform(low=np.log10(self.fmin),
+ high=np.log10(self.fmax))
+ if u <= self.fluxDensity(y):
+ results.append(y)
+ break
+ return results
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Randomly generate point sources information for MARX")
+ parser.add_argument("-V", "--version", action="version",
+ version="%(prog)s " + "v%s (%s)" % (__version__, __date__))
+ parser.add_argument("-n", "--number", dest="number", type=int, default=1,
+ help="number of point sources (default: 1)")
+ parser.add_argument("-m", "--fmin", dest="fmin",
+ type=float, default=1e-15,
+ help="minimum flux (default: 1e-15 erg/cm^2/s)")
+ parser.add_argument("-M", "--fmax", dest="fmax",
+ type=float, default=6000e-15,
+ help="maximum flux (default: 6000e-15 erg/cm^2/s)")
+ parser.add_argument("-r", "--region", dest="region", required=True,
+ help="region within which to generate coordinates ('box' only)")
+ parser.add_argument("-d", "--distance", dest="distance", default="0",
+ help="minimum distance between coordinates (default: 0) [unit: deg/arcmin]")
+ parser.add_argument("-u", "--unit", dest="unit", default="erg",
+ help="unit for input and output flux; 'erg' (default) / 'photon'")
+ parser.add_argument("-f", "--factor", dest="factor", type=float,
+ help="conversion factor from 'photon/cm^s/s' to 'erg/cm^2/s' (required if unit='photon')")
+ parser.add_argument("-o", "--outfile", dest="outfile",
+ help="output file to save the generate information list")
+ parser.add_argument("-O", "--outregion", dest="outregion",
+ help="write the generate information list as a DS9 region file")
+
+ args = parser.parse_args()
+
+ # Check flux unit
+ if args.unit == "erg":
+ unit = "erg/cm^2/s"
+ fmin = args.fmin
+ fmax = args.fmax
+ factor = 1.0
+ elif args.unit == "photon":
+ unit = "photon/cm^2/s"
+ factor = args.factor
+ try:
+ fmin = args.fmin / factor
+ fmax = args.fmax / factor
+ except NameError:
+ raise ValueError("argument '--factor' required")
+ else:
+ raise ValueError("unsupported flux unit")
+
+ region = RegionDS9()
+ region.parse(args.region)
+ # Check the box rotation
+ if not (abs(region.rotation) <= 1.0 or abs(region.rotation-360) <= 1.0):
+ raise NotImplementedError("rotated 'box' region not supported")
+
+ # Minimum distance between generated coordinates
+ try:
+ mindist = float(args.distance)
+ except ValueError:
+ mindist = region.parse_dms(args.distance)
+
+ randcoord = RandCoord(region.xc, region.yc, region.width, region.height,
+ mindist=mindist)
+ randflux = RandFlux(fmin, fmax)
+ coord = randcoord.generate(n=args.number)
+ flux = randflux.generate(n=args.number)
+
+ if args.outfile:
+ outfile = open(args.outfile, "w")
+ else:
+ outfile = sys.stdout
+
+ print("# region: %s" % args.region, file=outfile)
+ print("# mindist: %.9f [deg]" % mindist, file=outfile)
+ print("# f_min: %.9g; f_max: %.9g [%s]" % (fmin, fmax, unit), file=outfile)
+ print("# factor: %g [photon/cm^2/s] / [erg/cm^2/s]" % factor, file=outfile)
+ print("# R.A.[deg] Dec.[deg] Flux[%s]" % unit, file=outfile)
+ for ((ra, dec), f) in zip(coord, flux):
+ print("%.9f %.9f %.9g" % (ra, dec, f*factor), file=outfile)
+
+ if args.outfile:
+ outfile.close()
+
+ # Save the generated information as a DS9 region file if specified
+ if args.outregion:
+ reg_r = '3"'
+ reg_header = ["# Region file format: DS9 version 4.1", "fk5"]
+ regions = [
+ "circle(%.9f,%.9f,%s) # text={%.9g}" % \
+ (ra, dec, reg_r, f*factor) \
+ for ((ra, dec), f) in zip(coord, flux)
+ ]
+ regfile = open(args.outregion, "w")
+ regfile.write("\n".join(reg_header + regions))
+ regfile.close()
+
+
+if __name__ == "__main__":
+ main()
+
diff --git a/astro/query_ned.py b/astro/query_ned.py
new file mode 100755
index 0000000..959169f
--- /dev/null
+++ b/astro/query_ned.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# References:
+# [1] astroquery: NedClass
+# https://astroquery.readthedocs.org/en/latest/api/astroquery.ned.NedClass.html
+#
+# Change log:
+# 2016-05-25:
+# * Also output RA, DEC results
+# * Update argument process
+# * Simplify queried results process
+# * Improve comments a bit
+# * Some PEP8 fixes
+#
+# TODO:
+# * allow to query by coordinates & radius range
+# * filter queried results according to the type/other...
+# * if not queried by name, then try query by coordinates
+#
+
+"""
+Query NED with the provided name or coordinate.
+NASA/IPAC Extragalactic Database: http://ned.ipac.caltech.edu/
+"""
+
+import sys
+import argparse
+import csv
+from collections import OrderedDict
+
+from astroquery.ned import Ned
+from astroquery.exceptions import RemoteServiceError
+# from astropy import coordinates
+# import astropy.units as u
+
+
+__version__ = "0.2.2"
+__date__ = "2016-05-25"
+
+
+# Ned configurations
+Ned.TIMEOUT = 20
+
+
+def query_name(name, verbose=False):
+ """
+ Query NED by source name.
+ """
+ try:
+ q = Ned.query_object(name)
+ objname = q["Object Name"][0].decode("utf-8")
+ objtype = q["Type"][0].decode("utf-8")
+ ra = q["RA(deg)"][0]
+ dec = q["DEC(deg)"][0]
+ velocity = q["Velocity"][0]
+ z = q["Redshift"][0]
+ z_flag = q["Redshift Flag"][0].decode("utf-8")
+ refs = q["References"][0]
+ notes = q["Notes"][0]
+ if verbose:
+ print("%s: %s,%s,%s,%s,%s,%s,%s,%s,%s" %
+ (name, objname, objtype, ra, dec, velocity, z, z_flag,
+ refs, notes),
+ file=sys.stderr)
+ except RemoteServiceError as e:
+ objname = None
+ objtype = None
+ ra = None
+ dec = None
+ velocity = None
+ z = None
+ z_flag = None
+ refs = None
+ notes = None
+ if verbose:
+ print("*** %s: not found ***" % name, file=sys.stderr)
+ #
+ results = OrderedDict([
+ ("Name", name),
+ ("NED_Name", objname),
+ ("Type", objtype),
+ ("RA", ra),
+ ("DEC", dec),
+ ("Velocity", velocity),
+ ("z", z),
+ ("z_Flag", z_flag),
+ ("References", refs),
+ ("Notes", notes),
+ ])
+ return results
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Query NED database by source name")
+ parser.add_argument("-V", "--version", action="version",
+ version="%(prog)s " + "%s (%s)" % (__version__,
+ __date__))
+ parser.add_argument("-v", "--verbose", dest="verbose",
+ action="store_true",
+ help="show verbose information")
+ parser.add_argument("-i", "--input", dest="input", required=True,
+ help="source names to be queried (sep by comma); " +
+ "or a file contains the names (one per line)")
+ parser.add_argument("-o", "--output", dest="output", default=sys.stdout,
+ help="output CSV file with queried data")
+ args = parser.parse_args()
+
+ try:
+ names = map(str.strip, open(args.input).readlines())
+ except FileNotFoundError:
+ names = map(str.strip, args.input.split(","))
+
+ results_list = []
+
+ for name in names:
+ qr = query_name(name, verbose=args.verbose)
+ results_list.append(qr)
+
+ try:
+ of = open(args.output, "w")
+ except TypeError:
+ of = args.output
+ writer = csv.writer(of)
+ writer.writerow(results_list[0].keys())
+ for res in results_list:
+ writer.writerow(res.values())
+ if of is not sys.stdout:
+ of.close()
+
+
+if __name__ == "__main__":
+ main()
+
+
+# vim: set ts=4 sw=4 tw=0 fenc=utf-8 ft=python: #
diff --git a/astro/query_simbad.py b/astro/query_simbad.py
new file mode 100755
index 0000000..4e7ccd7
--- /dev/null
+++ b/astro/query_simbad.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# NOTE:
+# * SimbadClass
+# https://astroquery.readthedocs.org/en/latest/api/astroquery.simbad.SimbadClass.html
+# * All available VOTable fields:
+# http://simbad.u-strasbg.fr/simbad/sim-help?Page=sim-fscript#VotableFields
+#
+# ChangeLog:
+# 2016-01-14:
+# * Add 'z_value'
+#
+# TODO:
+# * allow to query by coordinates & radius range
+# * filter queryed results according to the type/other...
+# * if not queryed by name, then try query by coordinates
+#
+
+"""
+Query SIMBAD with the provided name or coordinate.
+http://simbad.u-strasbg.fr/simbad/
+"""
+
+__version__ = "0.1.1"
+__date__ = "2016-01-14"
+
+
+import sys
+import argparse
+import csv
+
+from astroquery.simbad import Simbad
+from astropy import coordinates
+import astropy.units as u
+
+
+## Simbad configurations
+Simbad.ROW_LIMIT = 30
+Simbad.TIMEOUT = 20
+
+## Add query items/fields
+# otype: standard name of the object type
+# rv_value: Radial velocity value. Eventually translated from a redshift
+# z_value: Redshift value. Eventually translated from a radial velocity
+# rvz_qual: Quality code (A: best, .., E: worst)
+# rvz_type: stored type of velocity: 'v'=radial velocity, 'z'=redshift
+Simbad.reset_votable_fields()
+Simbad.add_votable_fields('otype', 'rv_value', 'z_value', 'rvz_qual', 'rvz_type')
+
+
+def query_name(name, verbose=False):
+ """
+ Query SIMBAD by name.
+ """
+ q = Simbad.query_object(name)
+ try:
+ main_id = str(q['MAIN_ID'][0], encoding='utf-8')
+ otype = str(q['OTYPE'][0], encoding='utf-8')
+ rv = q['RV_VALUE'][0]
+ z = q['Z_VALUE'][0]
+ rvz_qual = q['RVZ_QUAL'][0]
+ rvz_type = q['RVZ_TYPE'][0]
+ if verbose:
+ print('%s: %s,%s,%s,%s,%s,%s' % (name, main_id, otype, rv, z,
+ rvz_qual, rvz_type))
+ except (TypeError, KeyError) as e:
+ main_id = ''
+ otype = ''
+ rv = ''
+ z = ''
+ rvz_qual = ''
+ rvz_type = ''
+ if verbose:
+ print('*** %s: not found ***' % name, file=sys.stderr)
+ #
+ results = {
+ 'main_id': main_id,
+ 'otype': otype,
+ 'rv': rv,
+ 'z': z,
+ 'rvz_qual': rvz_qual,
+ 'rvz_type': rvz_type,
+ }
+ return results
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Query SIMBAD ...")
+ parser.add_argument("-V", "--version", action="version",
+ version="%(prog)s " + "%s (%s)" % (__version__, __date__))
+ parser.add_argument("infile",
+ help="file contains list of names; one per line")
+ parser.add_argument("outfile",
+ help="output with queryed data, empty if not found; CSV format")
+ parser.add_argument("-v", "--verbose", dest="verbose",
+ action="store_true", help="show verbose information")
+ args = parser.parse_args()
+
+ name_list = []
+ main_id_list = []
+ otype_list = []
+ rv_list = []
+ z_list = []
+ rvz_qual_list = []
+ rvz_type_list = []
+
+ with open(args.infile) as f:
+ for name in f:
+ name = str.strip(name)
+ name_list.append(name)
+ qr = query_name(name, verbose=args.verbose)
+ main_id_list.append(qr['main_id'])
+ otype_list.append(qr['otype'])
+ rv_list.append(qr['rv'])
+ z_list.append(qr['z'])
+ rvz_qual_list.append(qr['rvz_qual'])
+ rvz_type_list.append(qr['rvz_type'])
+
+ with open(args.outfile, 'w') as of:
+ writer = csv.writer(of)
+ writer.writerow([ "Name", "SIMBAD_ID", "Type",
+ "RV", "z", "RV/z_Quality", "RV/z_Type" ])
+ for i in range(len(name_list)):
+ writer.writerow([ name_list[i],
+ main_id_list[i],
+ otype_list[i],
+ rv_list[i],
+ z_list[i],
+ rvz_qual_list[i],
+ rvz_type_list[i] ])
+
+
+if __name__ == "__main__":
+ main()
+
+
+# vim: set ts=4 sw=4 tw=0 fenc=utf-8 ft=python: #