aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAaron LI <aaronly.me@gmail.com>2016-01-18 13:37:06 +0800
committerAaron LI <aaronly.me@gmail.com>2016-01-18 13:37:06 +0800
commit40d0ae7de95689c5a4b5288a832675b788523c90 (patch)
treeaeb0b63214e4fe15fac664e6537cd8a68d0021a6
parent5c58e9e57523479aa55717b68cc43550ddb2dae6 (diff)
downloadatoolbox-40d0ae7de95689c5a4b5288a832675b788523c90.tar.bz2
Add lc_clean.py, query_ned.py, query_simbad.py
* lc_clean.py: iteratively clip the lightcurve and make a TXT-format GTI file for use in xselect (for Suzaku data filtering) * query_ned.py, query_simbad.py: query NED/SIMBAD using 'astroquery' by supplying the source name
-rwxr-xr-xastro/lc_clean.py151
-rwxr-xr-xastro/query_ned.py133
-rwxr-xr-xastro/query_simbad.py139
3 files changed, 423 insertions, 0 deletions
diff --git a/astro/lc_clean.py b/astro/lc_clean.py
new file mode 100755
index 0000000..a0fdc7b
--- /dev/null
+++ b/astro/lc_clean.py
@@ -0,0 +1,151 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Aaron LI
+# Created: 2016-01-16
+# Updated: 2016-01-16
+#
+
+"""
+Clean the lightcurve by fitting the RATE data with a Gaussian model,
+and discard the time bins with RATE beyond [mean-n*sigma, mean+n*sigma].
+"""
+
+__version__ = "0.1.0"
+__date__ = "2016-01-16"
+
+import sys
+import argparse
+
+from astropy.io import fits
+import numpy as np
+
+
+class LightCurve:
+ """
+ X-ray data light curve class
+ """
+ def __init__(self, lcfile):
+ f = fits.open(lcfile)
+ self.lc_data = f[1].data
+ self.lc_header = f[1].header
+ self.time = self.lc_data['TIME']
+ self.rate = self.lc_data['RATE']
+ self.rate_err = self.lc_data['ERROR']
+ self.TSTART = self.lc_header['TSTART']
+ self.TSTOP = self.lc_header['TSTOP']
+ self.TIMEDEL = self.lc_header['TIMEDEL']
+ self.TIMEPIXR = self.lc_header['TIMEPIXR']
+ f.close()
+
+ def sigma_clip(self, nsigma=3, maxiter=10):
+ """
+ Iteratively clip the time bins whose value lie beyond the
+ range [mean-n*sigma, mean+n*sigma].
+ """
+ rate = self.rate
+ keep_idx = np.ones(rate.shape, dtype=bool) # all True's
+ keep_num = np.sum(keep_idx)
+ keep_num0 = np.inf
+ i = 0
+ while (keep_num < keep_num0):
+ if (i >= maxiter):
+ print("WARNING: maximum iteration limit reached",
+ file=sys.stderr)
+ break
+ keep_num0 = keep_num
+ i += 1
+ mean = np.mean(rate[keep_idx])
+ sigma = np.std(rate[keep_idx])
+ cut_low = mean - nsigma * sigma
+ cut_high = mean + nsigma * sigma
+ keep_idx = np.logical_and((rate >= cut_low), (rate <= cut_high))
+ keep_num = np.sum(keep_idx)
+ # save clip results
+ self.niter = i
+ self.keep_idx = keep_idx
+ self.time_clipped = self.time[keep_idx]
+ self.rate_clipped = self.rate[keep_idx]
+
+ def make_gti(self, apply_header=True):
+ """
+ Make new GTIs (good time intervals) according to the clipped
+ time bins.
+ """
+ frac = 0.01 # TIMEDEL fraction to distingush two time bins
+ gti_start = []
+ gti_stop = []
+ time_start = self.time_clipped
+ time_stop = time_start + self.TIMEDEL
+ # first GTI start time
+ gti_start.append(time_start[0])
+ for tstart, tstop in zip(time_start[1:], time_stop[:-1]):
+ if (np.abs(tstart-tstop) <= frac * self.TIMEDEL):
+ # time bin continues
+ continue
+ else:
+ # a new GTI start
+ gti_start.append(tstart)
+ gti_stop.append(tstop)
+ # last GTI stop time
+ gti_stop.append(time_stop[-1])
+ # convert to numpy array
+ gti_start = np.array(gti_start)
+ gti_stop = np.array(gti_stop)
+ if apply_header:
+ # add TSTART to the time
+ gti_start += self.TSTART
+ gti_stop += self.TSTART
+ # save results
+ self.gti_start = gti_start
+ self.gti_stop = gti_stop
+
+ def write_gti(self, filename=None, header=True):
+ """
+ Write generated GTIs to file or screen (default)
+ """
+ if isinstance(filename, str):
+ outfile = open(filename, 'w')
+ else:
+ outfile = sys.stdout
+ #
+ if header:
+ outfile.write('# TSTART\tTSTOP\n')
+ outfile.write('\n'.join([ '%s\t%s' % (tstart, tstop) \
+ for tstart, tstop in zip(self.gti_start, self.gti_stop) ]))
+ #
+ if isinstance(filename, str):
+ outfile.close()
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Clean light curve by sigma clipping")
+ parser.add_argument("-V", "--version", action="version",
+ version="%(prog)s " + "%s (%s)" % (__version__, __date__))
+ parser.add_argument("infile",
+ help="input lightcurve file; contains [TIME, RATE] columns")
+ parser.add_argument("outfile", nargs='?', default=None,
+ help="output text-format GTI file; for XSELECT filter time")
+ parser.add_argument("-s", "--nsigma", dest="nsigma", type=float,
+ default=2.0, help="sigma clipping significant level")
+ parser.add_argument("-H", "--no-header", dest="noheader",
+ action="store_true", help="not write header to the output file")
+ parser.add_argument("-v", "--verbose", dest="verbose",
+ action="store_true", help="show verbose information")
+ args = parser.parse_args()
+
+ lc = LightCurve(args.infile)
+ lc.sigma_clip(nsigma=args.nsigma)
+ lc.make_gti(apply_header=True)
+ lc.write_gti(filename=args.outfile, header=(not args.noheader))
+ if args.verbose:
+ exposure = np.sum(lc.gti_stop - lc.gti_start)
+ print("# Total GTI: %.2f (s)" % exposure)
+
+
+if __name__ == "__main__":
+ main()
+
+
+# vim: set ts=4 sw=4 tw=0 fenc= ft=python: #
diff --git a/astro/query_ned.py b/astro/query_ned.py
new file mode 100755
index 0000000..feab14d
--- /dev/null
+++ b/astro/query_ned.py
@@ -0,0 +1,133 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# NOTE:
+# * NedClass
+# https://astroquery.readthedocs.org/en/latest/api/astroquery.ned.NedClass.html
+#
+# ChangeLog:
+#
+# TODO:
+# * allow to query by coordinates & radius range
+# * filter queryed results according to the type/other...
+# * if not queryed by name, then try query by coordinates
+#
+
+"""
+Query NED with the provided name or coordinate.
+NASA/IPAC Extragalactic Database: http://ned.ipac.caltech.edu/
+"""
+
+__version__ = "0.1.0"
+__date__ = "2016-01-14"
+
+
+import sys
+import argparse
+import csv
+
+from astroquery.ned import Ned
+from astroquery.exceptions import RemoteServiceError
+from astropy import coordinates
+import astropy.units as u
+
+
+## Ned configurations
+Ned.TIMEOUT = 20
+
+
+def query_name(name, verbose=False):
+ """
+ Query NED by name.
+ """
+ try:
+ q = Ned.query_object(name)
+ objname = str(q['Object Name'][0], encoding='utf-8')
+ objtype = str(q['Type'][0], encoding='utf-8')
+ velocity = q['Velocity'][0]
+ z = q['Redshift'][0]
+ z_flag = str(q['Redshift Flag'][0], encoding='utf-8')
+ refs = q['References'][0]
+ notes = q['Notes'][0]
+ if verbose:
+ print('%s: %s,%s,%s,%s,%s,%s,%s' % (name, objname, objtype,
+ velocity, z, z_flag,
+ refs, notes))
+ except RemoteServiceError as e:
+ objname = ''
+ objtype = ''
+ velocity = ''
+ z = ''
+ z_flag = ''
+ refs = ''
+ notes = ''
+ if verbose:
+ print('*** %s: not found ***' % name, file=sys.stderr)
+ #
+ results = {
+ 'objname': objname,
+ 'objtype': objtype,
+ 'velocity': velocity,
+ 'z': z,
+ 'z_flag': z_flag,
+ 'refs': refs,
+ 'notes': notes,
+ }
+ return results
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Query NED ...")
+ parser.add_argument("-V", "--version", action="version",
+ version="%(prog)s " + "%s (%s)" % (__version__, __date__))
+ parser.add_argument("infile",
+ help="file contains list of names; one per line")
+ parser.add_argument("outfile",
+ help="output with queryed data, empty if not found; CSV format")
+ parser.add_argument("-v", "--verbose", dest="verbose",
+ action="store_true", help="show verbose information")
+ args = parser.parse_args()
+
+ name_list = []
+ objname_list = []
+ objtype_list = []
+ velocity_list = []
+ z_list = []
+ z_flag_list = []
+ refs_list = []
+ notes_list = []
+
+ with open(args.infile) as f:
+ for name in f:
+ name = str.strip(name)
+ name_list.append(name)
+ qr = query_name(name, verbose=args.verbose)
+ objname_list.append(qr['objname'])
+ objtype_list.append(qr['objtype'])
+ velocity_list.append(qr['velocity'])
+ z_list.append(qr['z'])
+ z_flag_list.append(qr['z_flag'])
+ refs_list.append(qr['refs'])
+ notes_list.append(qr['notes'])
+
+ with open(args.outfile, 'w') as of:
+ writer = csv.writer(of)
+ writer.writerow([ "Name", "NED_Name", "Type", "Velocity",
+ "z", "z_Flag", "References", "Notes" ])
+ for i in range(len(name_list)):
+ writer.writerow([ name_list[i],
+ objname_list[i],
+ objtype_list[i],
+ velocity_list[i],
+ z_list[i],
+ z_flag_list[i],
+ refs_list[i],
+ notes_list[i] ])
+
+
+if __name__ == "__main__":
+ main()
+
+
+# vim: set ts=4 sw=4 tw=0 fenc=utf-8 ft=python: #
diff --git a/astro/query_simbad.py b/astro/query_simbad.py
new file mode 100755
index 0000000..4e7ccd7
--- /dev/null
+++ b/astro/query_simbad.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# NOTE:
+# * SimbadClass
+# https://astroquery.readthedocs.org/en/latest/api/astroquery.simbad.SimbadClass.html
+# * All available VOTable fields:
+# http://simbad.u-strasbg.fr/simbad/sim-help?Page=sim-fscript#VotableFields
+#
+# ChangeLog:
+# 2016-01-14:
+# * Add 'z_value'
+#
+# TODO:
+# * allow to query by coordinates & radius range
+# * filter queryed results according to the type/other...
+# * if not queryed by name, then try query by coordinates
+#
+
+"""
+Query SIMBAD with the provided name or coordinate.
+http://simbad.u-strasbg.fr/simbad/
+"""
+
+__version__ = "0.1.1"
+__date__ = "2016-01-14"
+
+
+import sys
+import argparse
+import csv
+
+from astroquery.simbad import Simbad
+from astropy import coordinates
+import astropy.units as u
+
+
+## Simbad configurations
+Simbad.ROW_LIMIT = 30
+Simbad.TIMEOUT = 20
+
+## Add query items/fields
+# otype: standard name of the object type
+# rv_value: Radial velocity value. Eventually translated from a redshift
+# z_value: Redshift value. Eventually translated from a radial velocity
+# rvz_qual: Quality code (A: best, .., E: worst)
+# rvz_type: stored type of velocity: 'v'=radial velocity, 'z'=redshift
+Simbad.reset_votable_fields()
+Simbad.add_votable_fields('otype', 'rv_value', 'z_value', 'rvz_qual', 'rvz_type')
+
+
+def query_name(name, verbose=False):
+ """
+ Query SIMBAD by name.
+ """
+ q = Simbad.query_object(name)
+ try:
+ main_id = str(q['MAIN_ID'][0], encoding='utf-8')
+ otype = str(q['OTYPE'][0], encoding='utf-8')
+ rv = q['RV_VALUE'][0]
+ z = q['Z_VALUE'][0]
+ rvz_qual = q['RVZ_QUAL'][0]
+ rvz_type = q['RVZ_TYPE'][0]
+ if verbose:
+ print('%s: %s,%s,%s,%s,%s,%s' % (name, main_id, otype, rv, z,
+ rvz_qual, rvz_type))
+ except (TypeError, KeyError) as e:
+ main_id = ''
+ otype = ''
+ rv = ''
+ z = ''
+ rvz_qual = ''
+ rvz_type = ''
+ if verbose:
+ print('*** %s: not found ***' % name, file=sys.stderr)
+ #
+ results = {
+ 'main_id': main_id,
+ 'otype': otype,
+ 'rv': rv,
+ 'z': z,
+ 'rvz_qual': rvz_qual,
+ 'rvz_type': rvz_type,
+ }
+ return results
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Query SIMBAD ...")
+ parser.add_argument("-V", "--version", action="version",
+ version="%(prog)s " + "%s (%s)" % (__version__, __date__))
+ parser.add_argument("infile",
+ help="file contains list of names; one per line")
+ parser.add_argument("outfile",
+ help="output with queryed data, empty if not found; CSV format")
+ parser.add_argument("-v", "--verbose", dest="verbose",
+ action="store_true", help="show verbose information")
+ args = parser.parse_args()
+
+ name_list = []
+ main_id_list = []
+ otype_list = []
+ rv_list = []
+ z_list = []
+ rvz_qual_list = []
+ rvz_type_list = []
+
+ with open(args.infile) as f:
+ for name in f:
+ name = str.strip(name)
+ name_list.append(name)
+ qr = query_name(name, verbose=args.verbose)
+ main_id_list.append(qr['main_id'])
+ otype_list.append(qr['otype'])
+ rv_list.append(qr['rv'])
+ z_list.append(qr['z'])
+ rvz_qual_list.append(qr['rvz_qual'])
+ rvz_type_list.append(qr['rvz_type'])
+
+ with open(args.outfile, 'w') as of:
+ writer = csv.writer(of)
+ writer.writerow([ "Name", "SIMBAD_ID", "Type",
+ "RV", "z", "RV/z_Quality", "RV/z_Type" ])
+ for i in range(len(name_list)):
+ writer.writerow([ name_list[i],
+ main_id_list[i],
+ otype_list[i],
+ rv_list[i],
+ z_list[i],
+ rvz_qual_list[i],
+ rvz_type_list[i] ])
+
+
+if __name__ == "__main__":
+ main()
+
+
+# vim: set ts=4 sw=4 tw=0 fenc=utf-8 ft=python: #