aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAaron LI <aaronly.me@gmail.com>2016-03-31 10:57:34 +0800
committerAaron LI <aaronly.me@gmail.com>2016-03-31 10:57:34 +0800
commitc9c896dea2ba43551c4e10bd49666105449e9bd7 (patch)
treee94b73f17b2d776c2acd4c9549657f500c3dc7ce
parent2b6cb9b655a53d43b32a8a211287c82f4f59999a (diff)
downloadatoolbox-c9c896dea2ba43551c4e10bd49666105449e9bd7.tar.bz2
add all scripts/tools
-rw-r--r--.gitignore3
-rwxr-xr-xastro/add_xflt.py159
-rwxr-xr-xaudio/ape2id3.py145
-rwxr-xr-xaudio/m4a2mp3.sh8
-rwxr-xr-xaudio/split2flac752
-rwxr-xr-xaudio/wma2mp3.sh20
-rwxr-xr-xbin/gen_points.py81
-rw-r--r--bin/img2list.py28
-rwxr-xr-xcli/colors.sh37
-rwxr-xr-xcli/colortest.bash39
-rwxr-xr-xcli/colortest.lua22
-rwxr-xr-xcli/colortest.pl365
-rwxr-xr-xcli/colortest.py18
-rwxr-xr-xcli/colortest.rb26
-rwxr-xr-xcli/colortest.sh53
-rwxr-xr-xcli/csv2json.py67
-rwxr-xr-xcli/jpegs2pdf.sh42
-rwxr-xr-xcli/pdfmerge.sh23
-rwxr-xr-xcli/shrinkpdf.sh56
-rwxr-xr-xcli/term_color.sh28
-rwxr-xr-xcli/term_color_2.sh32
-rwxr-xr-xcli/term_color_3.sh33
-rwxr-xr-xcli/unzip-gbk.py26
-rwxr-xr-xcli/vimpager85
-rw-r--r--cluster/kMeans.py76
-rw-r--r--img/force_field_transform.jl135
-rw-r--r--img/force_field_transform.py126
-rw-r--r--img/force_field_transform_fft.jl29
-rw-r--r--img/forcefield.jl87
-rw-r--r--julia/anisodiff.jl101
-rw-r--r--julia/calc_k_percentile.jl32
-rw-r--r--julia/forcefield.jl87
-rw-r--r--julia/ndgrid.jl52
-rw-r--r--julia/scharr.jl37
-rw-r--r--matlab/radialpsd.m83
-rw-r--r--r/chisq.R21
-rw-r--r--r/fillpois.R130
-rw-r--r--r/fitdistrplus-example.R54
-rw-r--r--r/forceFieldTransform.R46
-rw-r--r--r/kldiv.R349
-rw-r--r--r/lsos.R45
-rw-r--r--r/scaleSaliency.R246
-rw-r--r--rand/luminosity_func.py96
-rw-r--r--rand/pointsrc_coord.py98
-rw-r--r--rand/sphere.py57
-rw-r--r--region/region.py78
46 files changed, 4212 insertions, 1 deletions
diff --git a/.gitignore b/.gitignore
index f3a7200..c3edc65 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,5 @@
# .gitignore
-#
+#
*~
*.swp
@@ -7,6 +7,7 @@
*_bak
# python
+__init__.py
*.pyc
**/__pycache__
diff --git a/astro/add_xflt.py b/astro/add_xflt.py
new file mode 100755
index 0000000..8a718e6
--- /dev/null
+++ b/astro/add_xflt.py
@@ -0,0 +1,159 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Aaron LI
+# 2015/06/16
+
+"""
+Add XFLT#### keywords to the spectrum header according to the provided
+region, in order to employ the "PROJCT" model in XSPEC.
+"""
+
+__version__ = "0.1.0"
+__date__ = "2015-11-14"
+
+import sys
+import argparse
+import subprocess
+import re
+import os
+from collections import OrderedDict
+
+
+def parse_region(regstr):
+ """
+ Parse the given region string into one of the following 4 cases:
+ 1. annulus
+ 2. pie (cxc)
+ 3. pie + annulus (ftools/xmm)
+ 4. other
+
+ For the first 3 cases, the return is a dictionary like:
+ { 'shape': 'pie', 'xc': 55, 'yc': 89,
+ 'radius_in': 50, 'radius_out': 100,
+ 'angle_begin': 30, 'angle_end': 120 }
+ Otherwise, return None.
+ """
+ re_annulus = re.compile(r'^.*(?P<shape>annulus)\(\s*(?P<xc>[\d.-]+)\s*,\s*(?P<yc>[\d.-]+)\s*,\s*(?P<radius_in>[\d.-]+)\s*,\s*(?P<radius_out>[\d.-]+)\s*\).*$', re.I)
+ re_pie_cxc = re.compile(r'^.*(?P<shape>pie)\(\s*(?P<xc>[\d.-]+)\s*,\s*(?P<yc>[\d.-]+)\s*,\s*(?P<radius_in>[\d.-]+)\s*,\s*(?P<radius_out>[\d.-]+)\s*,\s*(?P<angle_begin>[\d.-]+)\s*,\s*(?P<angle_end>[\d.-]+)\s*\).*$', re.I)
+ re_pie_ft = re.compile(r'^.*(?P<shape>pie)\(\s*(?P<xc>[\d.-]+)\s*,\s*(?P<yc>[\d.-]+)\s*,\s*(?P<angle_begin>[\d.-]+)\s*,\s*(?P<angle_end>[\d.-]+)\s*\).*$', re.I)
+ m_annulus = re_annulus.match(regstr)
+ m_pie_cxc = re_pie_cxc.match(regstr)
+ m_pie_ft = re_pie_ft.match(regstr)
+ if m_pie_cxc is not None:
+ # case 2: pie (cxc)
+ region = OrderedDict([
+ ('shape', m_pie_cxc.group('shape').lower()),
+ ('xc', float(m_pie_cxc.group('xc'))),
+ ('yc', float(m_pie_cxc.group('yc'))),
+ ('radius_in', float(m_pie_cxc.group('radius_in'))),
+ ('radius_out', float(m_pie_cxc.group('radius_out'))),
+ ('angle_begin', float(m_pie_cxc.group('angle_begin'))),
+ ('angle_end', float(m_pie_cxc.group('angle_end')))
+ ])
+ elif m_pie_ft is not None:
+ # pie (ftools/xmm)
+ if m_annulus is not None:
+ # case 3: pie + annulus (ftools/xmm)
+ region = OrderedDict([
+ ('shape', m_pie_ft.group('shape').lower()),
+ ('xc', float(m_pie_ft.group('xc'))),
+ ('yc', float(m_pie_ft.group('yc'))),
+ ('radius_in', float(m_annulus.group('radius_in'))),
+ ('radius_out', float(m_annulus.group('radius_out'))),
+ ('angle_begin', float(m_pie_ft.group('angle_begin'))),
+ ('angle_end', float(m_pie_ft.group('angle_end')))
+ ])
+ else:
+ region = None
+ elif m_annulus is not None:
+ # case 1: annulus
+ region = OrderedDict([
+ ('shape', m_annulus.group('shape').lower()),
+ ('xc', float(m_annulus.group('xc'))),
+ ('yc', float(m_annulus.group('yc'))),
+ ('radius_in', float(m_annulus.group('radius_in'))),
+ ('radius_out', float(m_annulus.group('radius_out')))
+ ])
+ else:
+ region = None
+ return region
+
+
+def make_xflt(region):
+ """
+ Make a dictionary for the XFLT#### keywords and values according
+ to the provided region.
+
+ Return:
+ a dictionary containing the XFLT#### keywords and values, e.g.,
+ { 'XFLT0001': radius_out, 'XFLT0002': radius_out, 'XFLT0003': 0,
+ 'XFLT0004': angle_begin, 'XFLT0005': angle_end }
+ """
+ if region.get('shape') == 'annulus':
+ xflt = OrderedDict([
+ ('XFLT0001', region.get('radius_out')),
+ ('XFLT0002', region.get('radius_out')),
+ ('XFLT0003', 0)
+ ])
+ elif region.get('shape') == 'pie':
+ xflt = OrderedDict([
+ ('XFLT0001', region.get('radius_out')),
+ ('XFLT0002', region.get('radius_out')),
+ ('XFLT0003', 0),
+ ('XFLT0004', region.get('angle_begin')),
+ ('XFLT0005', region.get('angle_end'))
+ ])
+ else:
+ xflt = None
+ return xflt
+
+
+def add_xflt(fitsfile, xflt):
+ """
+ Add XFLT#### keywords to the given FITS file.
+ """
+ if xflt is not None:
+ for key, val in xflt.items():
+ cmd = 'fthedit "%(file)s+1" keyword="%(key)s" operation=add value="%(val)s"' % \
+ {'file': fitsfile, 'key': key, 'val': val}
+ print("CMD: %s" % cmd, file=sys.stderr)
+ subprocess.call(cmd, shell=True)
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Add XFLT???? keywords to spectrum header")
+ parser.add_argument("-V", "--version", action="version",
+ version="%(prog)s " + "%s (%s)" % (__version__, __date__))
+ parser.add_argument("spectrum", help="input spectrum; @stack")
+ parser.add_argument("region", help="extraction region of this spectrum; @stack")
+ parser.add_argument("arcmin2pix", nargs='?', help="1 arcmin = ? pixel",
+ default=1.0, type=float)
+ args = parser.parse_args()
+
+ if args.spectrum[0] == '@' and args.region[0] == '@':
+ spectrum = map(str.strip, open(args.spectrum[1:]).readlines())
+ regionstr = map(str.strip, open(args.region[1:]).readlines())
+ else:
+ spectrum = [ args.spectrum ]
+ regionstr = [ args.region ]
+
+ for spec, reg in zip(spectrum, regionstr):
+ print("SPECTRUM: '%s'" % spec)
+ print("REGION: '%s'" % reg)
+ region = parse_region(reg)
+ if region is None:
+ print("ERROR: invalid region %s" % reg, file=sys.stderr)
+ sys.exit(11)
+ else:
+ # Convert pixel to arcmin
+ region['radius_in'] = region['radius_in'] / args.arcmin2pix
+ region['radius_out'] = region['radius_out'] / args.arcmin2pix
+ xflt = make_xflt(region)
+ add_xflt(spec, xflt)
+
+
+if __name__ == "__main__":
+ main()
+
diff --git a/audio/ape2id3.py b/audio/ape2id3.py
new file mode 100755
index 0000000..8651a2f
--- /dev/null
+++ b/audio/ape2id3.py
@@ -0,0 +1,145 @@
+#! /usr/bin/env python
+import sys
+from optparse import OptionParser
+import mutagen
+from mutagen.apev2 import APEv2
+from mutagen.id3 import ID3, TXXX
+
+def convert_gain(gain):
+ if gain[-3:] == " dB":
+ gain = gain[:-3]
+ try:
+ gain = float(gain)
+ except ValueError:
+ raise ValueError, "invalid gain value"
+ return "%.2f dB" % gain
+def convert_peak(peak):
+ try:
+ peak = float(peak)
+ except ValueError:
+ raise ValueError, "invalid peak value"
+ return "%.6f" % peak
+
+REPLAYGAIN_TAGS = (
+ ("mp3gain_album_minmax", None),
+ ("mp3gain_minmax", None),
+ ("replaygain_album_gain", convert_gain),
+ ("replaygain_album_peak", convert_peak),
+ ("replaygain_track_gain", convert_gain),
+ ("replaygain_track_peak", convert_peak),
+)
+
+
+class Logger(object):
+ def __init__(self, log_level, prog_name):
+ self.log_level = log_level
+ self.prog_name = prog_name
+ self.filename = None
+ def prefix(self, msg):
+ if self.filename is None:
+ return msg
+ return "%s: %s" % (self.filename, msg)
+ def debug(self, msg):
+ if self.log_level >= 4:
+ print self.prefix(msg)
+ def info(self, msg):
+ if self.log_level >= 3:
+ print self.prefix(msg)
+ def warning(self, msg):
+ if self.log_level >= 2:
+ print self.prefix("WARNING: %s" % msg)
+ def error(self, msg):
+ if self.log_level >= 1:
+ sys.stderr.write("%s: %s\n" % (self.prog_name, msg))
+ def critical(self, msg, retval=1):
+ self.error(msg)
+ sys.exit(retval)
+
+class Ape2Id3(object):
+ def __init__(self, logger, force=False):
+ self.log = logger
+ self.force = force
+ def convert_tag(self, name, value):
+ pass
+ def copy_replaygain_tag(self, apev2, id3, name, converter=None):
+ self.log.debug("processing '%s' tag" % name)
+ if name not in apev2:
+ self.log.info("no APEv2 '%s' tag found, skipping tag" % name)
+ return False
+ if not self.force and ("TXXX:%s" % name) in id3:
+ self.log.info("ID3 '%s' tag already exists, skpping tag" % name)
+ return False
+ value = str(apev2[name])
+ if callable(converter):
+ self.log.debug("converting APEv2 '%s' tag from '%s'" %
+ (name, value))
+ try:
+ value = converter(value)
+ except ValueError:
+ self.log.warning("invalid value for APEv2 '%s' tag" % name)
+ return False
+ self.log.debug("converted APEv2 '%s' tag to '%s'" % (name, value))
+ id3.add(TXXX(encoding=1, desc=name, text=value))
+ self.log.info("added ID3 '%s' tag with value '%s'" % (name, value))
+ return True
+ def copy_replaygain_tags(self, filename):
+ self.log.filename = filename
+ self.log.debug("begin processing file")
+ try:
+ apev2 = APEv2(filename)
+ except mutagen.apev2.error:
+ self.log.info("no APEv2 tag found, skipping file")
+ return
+ except IOError:
+ e = sys.exc_info()
+ self.log.error("%s" % e[1])
+ return
+ try:
+ id3 = ID3(filename)
+ except mutagen.id3.error:
+ self.log.info("no ID3 tag found, creating one")
+ id3 = ID3()
+ modified = False
+ for name, converter in REPLAYGAIN_TAGS:
+ copied = self.copy_replaygain_tag(apev2, id3, name, converter)
+ if copied:
+ modified = True
+ if modified:
+ self.log.debug("saving modified ID3 tag")
+ id3.save(filename)
+ self.log.debug("done processing file")
+ self.log.filename = None
+
+def main(prog_name, options, args):
+ logger = Logger(options.log_level, prog_name)
+ ape2id3 = Ape2Id3(logger, force=options.force)
+ for filename in args:
+ ape2id3.copy_replaygain_tags(filename)
+
+if __name__ == "__main__":
+ parser = OptionParser(version="0.1", usage="%prog [OPTION]... FILE...",
+ description="Copy APEv2 ReplayGain tags on "
+ "FILE(s) to ID3v2.")
+ parser.add_option("-q", "--quiet", dest="log_level",
+ action="store_const", const=0, default=1,
+ help="do not output error messages")
+ parser.add_option("-v", "--verbose", dest="log_level",
+ action="store_const", const=3,
+ help="output warnings and informational messages")
+ parser.add_option("-d", "--debug", dest="log_level",
+ action="store_const", const=4,
+ help="output debug messages")
+ parser.add_option("-f", "--force", dest="force",
+ action="store_true", default=False,
+ help="force overwriting of existing ID3v2 "
+ "ReplayGain tags")
+ prog_name = parser.get_prog_name()
+ options, args = parser.parse_args()
+ if len(args) < 1:
+ parser.error("no files specified")
+ try:
+ main(prog_name, options, args)
+ except KeyboardInterrupt:
+ pass
+
+# vim: set expandtab shiftwidth=4 softtabstop=4 textwidth=79:
diff --git a/audio/m4a2mp3.sh b/audio/m4a2mp3.sh
new file mode 100755
index 0000000..5d06cd9
--- /dev/null
+++ b/audio/m4a2mp3.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+bitrate=192
+
+for i in *.m4a; do
+ faad -o - "$i" | lame -h -b $bitrate - "${i%m4a}mp3"
+done
+
diff --git a/audio/split2flac b/audio/split2flac
new file mode 100755
index 0000000..6622262
--- /dev/null
+++ b/audio/split2flac
@@ -0,0 +1,752 @@
+#!/bin/sh
+# Copyright (c) 2009-2015 Serge "ftrvxmtrx" Ziryukin
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+# Dependencies:
+# shntool, cuetools
+# SPLIT: flac, wavpack, mac
+# CONVERT: flac/flake, faac, libmp4v2, id3lib/mutagen, lame, vorbis-tools
+# ART: ImageMagick
+# CHARSET: iconv, enca
+# GAIN: flac, aacgain, mp3gain, vorbisgain
+
+# Exit codes:
+# 0 - success
+# 1 - error in arguments
+# 2 - file or path is not accessible
+# 3 - something has failed
+
+[ -n "${XDG_CONFIG_HOME}" ] && CONFIG="${XDG_CONFIG_HOME}/split2flac/split2flac.conf"
+[ -r "${CONFIG}" ] || CONFIG="${HOME}/.split2flac"
+TMPPIC="${HOME}/.split2flac_cover.jpg"
+FAILED="split_failed.txt"
+
+NOSUBDIRS=0
+NOPIC=0
+REMOVE=0
+NOCOLORS=0
+PIC_SIZE="192x192"
+REPLAY_GAIN=0
+FORMAT="${0##*split2}"
+DIR="."
+OUTPATTERN="@artist/{@year - }@album/@track - @title.@ext"
+COPYMASKS="[Cc]overs \*.log \*.txt \*.jpg \*.cbr"
+COPYFILES=1
+ENCA_ARGS=""
+
+# codecs default arguments
+ENCARGS_flac="-8"
+ENCARGS_m4a="-q 500"
+ENCARGS_mp3="--preset standard"
+ENCARGS_ogg="-q 5"
+ENCARGS_wav=""
+
+# load settings
+eval $(cat "${CONFIG}" 2>/dev/null)
+DRY=0
+SAVE=0
+NASK=0
+unset PIC INPATH CUE CHARSET
+FORCE=0
+
+# do not forget to update before commit
+VERSION=121
+
+HELP="\${cG}split2flac version: ${VERSION}
+Splits one big \${cU}APE/FLAC/WV/WAV\$cZ\$cG audio image (or a collection) into \${cU}FLAC/M4A/MP3/OGG_VORBIS/WAV\$cZ\$cG tracks with tagging and renaming.
+
+Usage: \${cZ}split2\${FORMAT} [\${cU}OPTIONS\$cZ] \${cU}FILE\$cZ [\${cU}OPTIONS\$cZ]\$cZ
+ \${cZ}split2\${FORMAT} [\${cU}OPTIONS\$cZ] \${cU}DIR\$cZ [\${cU}OPTIONS\$cZ]\$cZ
+ \$cG-p\$cZ - dry run
+ \$cG-o \${cU}DIRECTORY\$cZ \$cR*\$cZ - set output directory (current is \$cP\${DIR}\$cZ)
+ \$cG-of \${cU}'PATTERN'\$cZ \$cR*\$cZ - use specific output naming pattern (current is \$cP'\${OUTPATTERN}'\$cZ)
+ \$cG-cue \${cU}FILE\$cZ - use file as a cue sheet (does not work with \${cU}DIR\$cZ)
+ \$cG-cuecharset \${cU}CHARSET\$cZ - convert cue sheet from CHARSET to UTF-8 (no conversion by default)
+ \$cG-nask\$cZ - do not ask to enter proper charset of a cue sheet (default is to ask)
+ \$cG-f \${cU}FORMAT\$cZ - use specific output format (current is \$cP\${FORMAT}\$cZ)
+ \$cG-e \${cU}'ARG1 ARG2'\$cZ \$cR*\$cZ - encoder arguments (current is \$cP'\${ENCARGS}'\$cZ)
+ \$cG-eh\$cZ - show help for current encoder and exit\$cZ
+ \$cG-enca \${cU}'ARG1 ARG2'\$cZ \$cR*\$cZ - enca additional arguments (current is \$cP'\${ENCA_ARGS}'\$cZ)
+ \$cG-c \${cU}FILE\$cZ \$cR*\$cZ - use file as a cover image (does not work with \${cU}DIR\$cZ)
+ \$cG-nc \${cR}*\$cZ - do not set any cover images
+ \$cG-C \${cU}MASKS\$cZ \$cR*\$cZ - specify wildcards for files to copy over (current is \$cP'\${COPYMASKS}'\$cZ)
+ \$cG-nC \${cR}*\$cZ - do not copy any files
+ \$cG-cs \${cU}WxH\$cZ \$cR*\$cZ - set cover image size (current is \$cP\${PIC_SIZE}\$cZ)
+ \$cG-d \$cR*\$cZ - create artist/album subdirs (default)
+ \$cG-nd \$cR*\$cZ - do not create any subdirs
+ \$cG-D \$cR*\$cZ - delete original file
+ \$cG-nD \$cR*\$cZ - do not remove the original (default)
+ \$cG-F\$cZ - force deletion without asking
+ \$cG-colors\$cZ \$cR*\$cZ - colorized output (default)
+ \$cG-nocolors\$cZ \$cR*\$cZ - turn off colors
+ \$cG-g\$cZ \$cR*\$cZ - adjust audio gain
+ \$cG-ng\$cZ \$cR*\$cZ - do not adjust audio gain (default)
+ \$cG-s\$cZ - save configuration to \$cP\"\${CONFIG}\"\$cZ
+ \$cG-h\$cZ - print this message
+ \$cG-v\$cZ - print version
+
+\$cR*\$cZ - option affects configuration if \$cP'-s'\$cZ option passed.
+\${cP}NOTE: \$cG'-c some_file.jpg -s'\$cP only \${cU}allows\$cZ\$cP cover images, it doesn't set a default one.
+\${cZ}Supported \$cU\${cG}FORMATs\${cZ}: flac, m4a, mp3, ogg, wav.
+Supported tags for \$cU\${cG}PATTERN\${cZ}: @artist, @album, @year, @track, @performer, @title, @genre, @ext.
+@performer tag is useful with 'various artists' albums, when you want to add
+each artist's name to the track filename. It works as @artist if track performer is undefined.
+Special \"underscored\" tags are also supported (@_artist, @_album, etc). If used, spaces will be replaced with
+underscores. It's useful if you want to have filenames without spaces.
+
+It's better to pass \$cP'-p'\$cZ option to see what will happen when actually splitting tracks.
+You may want to pass \$cP'-s'\$cZ option for the first run to save default configuration
+(output dir, cover image size, etc.) so you won't need to pass a lot of options
+every time, just a filename. Script will try to find CUE sheet if it wasn't specified.
+It also supports internal CUE sheets (FLAC, APE and WV).\n"
+
+msg="printf"
+
+emsg () {
+ $msg "${cR}$1${cZ}"
+}
+
+SKIP_UPDATE_ENCARGS=0
+
+update_encargs () {
+ if [ ${SKIP_UPDATE_ENCARGS} -eq 0 ]; then
+ e="\${ENCARGS_${FORMAT}}"
+ ENCARGS=`eval echo "$e"`
+ ENCHELP=0
+ fi
+}
+
+update_colors () {
+ if [ "${NOCOLORS}" -eq 0 ]; then
+ cR="\033[31m"
+ cG="\033[32m"
+ cC="\033[35m"
+ cP="\033[36m"
+ cU="\033[4m"
+ cZ="\033[0m"
+ else
+ unset cR cG cC cP cU cZ
+ fi
+}
+
+update_encargs
+update_colors
+
+# parse arguments
+while [ "$1" ]; do
+ case "$1" in
+ -o) DIR=$2; shift;;
+ -of) OUTPATTERN=$2; shift;;
+ -cue) CUE=$2; shift;;
+ -cuecharset) CHARSET=$2; shift;;
+ -nask) NASK=1;;
+ -f) FORMAT=$2; update_encargs; shift;;
+ -e) ENCARGS=$2; SKIP_UPDATE_ENCARGS=1; shift;;
+ -eh) ENCHELP=1;;
+ -enca) ENCA_ARGS=$2; shift;;
+ -c) NOPIC=0; PIC=$2; shift;;
+ -nc) NOPIC=1;;
+ -C) COPYMASKS=$2; COPYFILES=1; shift;;
+ -nC) COPYFILES=0;;
+ -cs) PIC_SIZE=$2; shift;;
+ -d) NOSUBDIRS=0;;
+ -nd) NOSUBDIRS=1;;
+ -p) DRY=1;;
+ -D) REMOVE=1;;
+ -nD) REMOVE=0;;
+ -F) FORCE=1;;
+ -colors) NOCOLORS=0; update_colors;;
+ -nocolors) NOCOLORS=1; update_colors;;
+ -g) REPLAY_GAIN=1;;
+ -ng) REPLAY_GAIN=0;;
+ -s) SAVE=1;;
+ -h|--help|-help) eval "$msg \"${HELP}\""; exit 0;;
+ -v|--version)
+ $msg "split2${FORMAT} version: ${VERSION}\n\n";
+ shntool -v 2>&1 | grep '^shntool';
+ flac --version 2>/dev/null;
+ wavpack --help 2>&1 | grep 'Version';
+ mac 2>&1 | grep '(v ';
+ faac -h 2>&1 | grep '^FAAC';
+ oggenc --version 2>/dev/null;
+ lame --version | grep '^LAME';
+ exit 0;;
+ -*) eval "$msg \"${HELP}\""; emsg "\nUnknown option $1\n"; exit 1;;
+ *)
+ if [ -n "${INPATH}" ]; then
+ eval "$msg \"${HELP}\""
+ emsg "\nUnknown option $1\n"
+ exit 1
+ elif [ ! -r "$1" ]; then
+ emsg "Unable to read $1\n"
+ exit 2
+ else
+ INPATH="$1"
+ fi;;
+ esac
+ shift
+done
+
+eval "export ENCARGS_${FORMAT}=\"${ENCARGS}\""
+
+# save configuration if needed
+if [ ${SAVE} -eq 1 ]; then
+ echo "DIR=\"${DIR}\"" > "${CONFIG}"
+ echo "OUTPATTERN=\"${OUTPATTERN}\"" >> "${CONFIG}"
+ echo "COPYMASKS=\"${COPYMASKS}\"" >> "${CONFIG}"
+ echo "COPYFILES=${COPYFILES}" >> "${CONFIG}"
+ echo "NOSUBDIRS=${NOSUBDIRS}" >> "${CONFIG}"
+ echo "NOPIC=${NOPIC}" >> "${CONFIG}"
+ echo "REMOVE=${REMOVE}" >> "${CONFIG}"
+ echo "PIC_SIZE=${PIC_SIZE}" >> "${CONFIG}"
+ echo "NOCOLORS=${NOCOLORS}" >> "${CONFIG}"
+ echo "REPLAY_GAIN=${REPLAY_GAIN}" >> "${CONFIG}"
+ echo "ENCARGS_flac=\"${ENCARGS_flac}\"" >> "${CONFIG}"
+ echo "ENCARGS_m4a=\"${ENCARGS_m4a}\"" >> "${CONFIG}"
+ echo "ENCARGS_mp3=\"${ENCARGS_mp3}\"" >> "${CONFIG}"
+ echo "ENCARGS_ogg=\"${ENCARGS_ogg}\"" >> "${CONFIG}"
+ echo "ENCARGS_wav=\"${ENCARGS_wav}\"" >> "${CONFIG}"
+ echo "ENCA_ARGS=\"${ENCA_ARGS}\"" >> "${CONFIG}"
+ $msg "${cP}Configuration saved$cZ\n"
+fi
+
+# use flake if possible
+command -v flake >/dev/null && FLAC_ENCODER="flake" || FLAC_ENCODER="flac"
+
+METAFLAC="metaflac --no-utf8-convert"
+VORBISCOMMENT="vorbiscomment -R -a"
+command -v mid3v2 >/dev/null && ID3TAG="mid3v2" || ID3TAG="id3tag -2"
+MP4TAGS="mp4tags"
+GETTAG="cueprint -n 1 -t"
+VALIDATE="sed s/[^][[:space:][:alnum:]&_#,.'\"\(\)!-]//g"
+
+# check & print output format
+msg_format="${cG}Output format :$cZ"
+case ${FORMAT} in
+ flac) $msg "$msg_format FLAC [using ${FLAC_ENCODER} tool]"; enc_help="${FLAC_ENCODER} -h";;
+ m4a) $msg "$msg_format M4A"; enc_help="faac --help";;
+ mp3) $msg "$msg_format MP3"; enc_help="lame --help";;
+ ogg) $msg "$msg_format OGG VORBIS"; enc_help="oggenc --help";;
+ wav) $msg "$msg_format WAVE"; enc_help="echo Sorry, no arguments available for this encoder";;
+ *) emsg "Unknown output format \"${FORMAT}\"\n"; exit 1;;
+esac
+
+$msg " (${ENCARGS})\n"
+
+if [ ${ENCHELP} -eq 1 ]; then
+ ${enc_help}
+ exit 0
+fi
+
+$msg "${cG}Output dir :$cZ ${DIR:?Output directory was not set}\n"
+
+# replaces a tag name with the value of the tag. $1=pattern $2=tag_name $3=tag_value
+update_pattern_aux () {
+ tag_name="$2"
+ tag_value="$3"
+ expr_match="@${tag_name}"
+ expr_match_opt="[{]\([^}{]*\)${expr_match}\([^}]*\)[}]"
+
+ echo "$1" | { [ "${tag_value}" ] \
+ && sed "s/${expr_match_opt}/\1${tag_value}\2/g;s/${expr_match}/${tag_value}/g" \
+ || sed "s/${expr_match_opt}//g;s/${expr_match}//g"; }
+}
+
+# replaces a tag name with the value of the tag. $1=pattern $2=tag_name $3=tag_value
+update_pattern () {
+ # replace '/' with '\' and '&' with '\&' for proper sed call
+ tag_name=$(echo "$2" | sed 's,/,\\\\,g;s,&,\\&,g')
+ tag_value=$(echo "$3" | sed 's,/,\\\\,g;s,&,\\&,g')
+
+ v=$(update_pattern_aux "$1" "${tag_name}" "${tag_value}")
+ update_pattern_aux "$v" "_${tag_name}" $(echo "${tag_value}" | sed "s/ /_/g")
+}
+
+# splits a file
+split_file () {
+ TMPCUE="${HOME}/.split2flac_XXXXX.cue"
+ FILE="$1"
+
+ if [ ! -r "${FILE}" ]; then
+ emsg "Can not read the file\n"
+ return 1
+ fi
+
+ # search for a cue sheet if not specified
+ if [ -z "${CUE}" ]; then
+ CUE="${FILE}.cue"
+ if [ ! -r "${CUE}" ]; then
+ CUE="${FILE%.*}.cue"
+ if [ ! -r "${CUE}" ]; then
+ # try to extract internal one
+ CUESHEET=$(${METAFLAC} --show-tag=CUESHEET "${FILE}" 2>/dev/null | sed 's/^cuesheet=//;s/^CUESHEET=//')
+
+ # try WV internal cue sheet
+ [ -z "${CUESHEET}" ] && CUESHEET=$(wvunpack -q -c "${FILE}" 2>/dev/null)
+
+ # try APE internal cue sheet (omfg!)
+ if [ -z "${CUESHEET}" ]; then
+ APETAGEX=$(tail -c 32 "$1" | cut -b 1-8 2>/dev/null)
+ if [ "${APETAGEX}" = "APETAGEX" ]; then
+ LENGTH=$(tail -c 32 "$1" | cut -b 13-16 | od -t u4 | awk '{printf $2}') 2>/dev/null
+ tail -c ${LENGTH} "$1" | grep -a CUESHEET >/dev/null 2>&1
+ if [ $? -eq 0 ]; then
+ CUESHEET=$(tail -c ${LENGTH} "$1" | sed 's/.*CUESHEET.//g' 2>/dev/null)
+ [ $? -ne 0 ] && CUESHEET=""
+ fi
+ fi
+ fi
+
+ if [ -n "${CUESHEET}" ]; then
+ $msg "${cP}Found internal cue sheet$cZ\n"
+ TMPCUE=$(mktemp "${TMPCUE}")
+ CUE="${TMPCUE}"
+ echo "${CUESHEET}" > "${CUE}"
+ TMPCUE="${HOME}/.split2flac_XXXXX.cue"
+
+ if [ $? -ne 0 ]; then
+ emsg "Unable to save internal cue sheet\n"
+ return 1
+ fi
+ else
+ unset CUE
+ fi
+ fi
+ fi
+ fi
+
+ # print cue sheet filename
+ if [ -z "${CUE}" ]; then
+ emsg "No cue sheet\n"
+ return 1
+ fi
+
+ # cue sheet charset
+ [ -z "${CHARSET}" ] && CHARSET="utf-8" || $msg "${cG}Cue charset : $cP${CHARSET} -> utf-8$cZ\n"
+
+ CUESHEET=$(iconv -f "${CHARSET}" -t utf-8 "${CUE}" 2>/dev/null)
+
+ # try to guess the charset using enca
+ if [ $? -ne 0 ]; then
+ CUESHEET=$(enconv ${ENCA_ARGS} -x utf8 < "${CUE}" 2>/dev/null)
+ fi
+
+ if [ $? -ne 0 ]; then
+ [ "${CHARSET}" = "utf-8" ] \
+ && emsg "Cue sheet is not utf-8\n" \
+ || emsg "Unable to convert cue sheet from ${CHARSET} to utf-8\n"
+
+ if [ ${NASK} -eq 0 ]; then
+ while [ 1 ]; do
+ echo -n "Please enter the charset (or just press ENTER to ignore) > "
+ read CHARSET
+
+ [ -z "${CHARSET}" ] && break
+ $msg "${cG}Converted cue sheet:$cZ\n"
+ iconv -f "${CHARSET}" -t utf-8 "${CUE}" || continue
+
+ echo -n "Is this right? [Y/n] > "
+ read YEP
+ [ -z "${YEP}" -o "${YEP}" = "y" -o "${YEP}" = "Y" ] && break
+ done
+
+ CUESHEET=$(iconv -f "${CHARSET}" -t utf-8 "${CUE}" 2>/dev/null)
+ fi
+ fi
+
+ # save converted cue sheet
+ TMPCUE=$(mktemp "${TMPCUE}")
+ CUE="${TMPCUE}"
+ echo "${CUESHEET}" > "${CUE}"
+
+ if [ $? -ne 0 ]; then
+ emsg "Unable to save converted cue sheet\n"
+ return 1
+ fi
+
+ SDIR=$(dirname "${FILE}")
+
+ # search for a front cover image
+ if [ ${NOPIC} -eq 1 ]; then
+ unset PIC
+ elif [ -z "${PIC}" ]; then
+ # try common names
+ for i in *[Cc]over*.jpg *[Ff]older*.jpg */*[Cc]over*.jpg */*[Ff]older*.jpg; do
+ if [ -r "${SDIR}/$i" ]; then
+ PIC="${SDIR}/$i"
+ break
+ fi
+ done
+
+ # try to extract internal one
+ if [ -z "${PIC}" ]; then
+ ${METAFLAC} --export-picture-to="${TMPPIC}" "${FILE}" 2>/dev/null
+ if [ $? -ne 0 ]; then
+ unset PIC
+ else
+ PIC="${TMPPIC}"
+ fi
+ fi
+ fi
+
+ $msg "${cG}Cue sheet :$cZ ${CUE}\n"
+ $msg "${cG}Cover image :$cZ ${PIC:-not set}\n"
+
+ # file removal warning
+ if [ ${REMOVE} -eq 1 ]; then
+ msg_removal="\n${cR}Also remove original"
+ [ ${FORCE} -eq 1 ] \
+ && $msg "$msg_removal (WITHOUT ASKING)$cZ\n" \
+ || $msg "$msg_removal if user says 'y'$cZ\n"
+ fi
+
+ # files to copy over
+ if [ ${COPYFILES} -eq 1 -a -n "${COPYMASKS}" ]; then
+ $msg "${cG}Copy over :$cZ ${COPYMASKS}\n"
+ fi
+
+ # get common tags
+ TAG_ARTIST=$(${GETTAG} %P "${CUE}" 2>/dev/null)
+ TAG_ALBUM=$(${GETTAG} %T "${CUE}" 2>/dev/null)
+ TRACKS_NUM=$(${GETTAG} %N "${CUE}" 2>/dev/null)
+
+ # some cue sheets may have non-audio tracks
+ # we can check the difference between what cuebreakpoints and cueprint gives us
+ BREAKPOINTS_NUM=$(($(cuebreakpoints "${CUE}" 2>/dev/null | wc -l) + 1))
+
+ # too bad, we can't fix that in a _right_ way
+ if [ ${BREAKPOINTS_NUM} -lt ${TRACKS_NUM} ]; then
+ emsg "'cueprint' tool reported ${TRACKS_NUM} tracks, "
+ emsg "but there seem to be only ${BREAKPOINTS_NUM} audio ones\n"
+ emsg "Sorry, there is no any helpful options in the 'cueprint' tool for this problem.\n"
+ emsg "You probably remove non-audio tracks from the cue sheet (\"${CUE}\") by hand.\n"
+ return 1
+ fi
+
+ if [ -z "${TRACKS_NUM}" ]; then
+ emsg "Failed to get number of tracks from CUE sheet.\n"
+ emsg "There may be an error in the sheet.\n"
+ emsg "Running ${GETTAG} %N \"${CUE}\" produces this:\n"
+ ${GETTAG} %N "${CUE}"
+ return 1
+ fi
+
+ TAG_GENRE=$(grep 'REM[ \t]\+GENRE[ \t]\+' "${CUE}" | head -1 | sed 's/REM[ \t]\+GENRE[ \t]\+//;s/^"\(.*\)"$/\1/')
+
+ YEAR=$(awk '{ if (/REM[ \t]+DATE/) { printf "%i", $3; exit } }' < "${CUE}")
+ YEAR=$(echo ${YEAR} | tr -d -c '[:digit:]')
+
+ unset TAG_DATE
+
+ if [ -n "${YEAR}" ]; then
+ [ ${YEAR} -ne 0 ] && TAG_DATE="${YEAR}"
+ fi
+
+ $msg "\n${cG}Artist :$cZ ${TAG_ARTIST}\n"
+ $msg "${cG}Album :$cZ ${TAG_ALBUM}\n"
+ [ "${TAG_GENRE}" ] && $msg "${cG}Genre :$cZ ${TAG_GENRE}\n"
+ [ "${TAG_DATE}" ] && $msg "${cG}Year :$cZ ${TAG_DATE}\n"
+ $msg "${cG}Tracks :$cZ ${TRACKS_NUM}\n\n"
+
+ # those tags won't change, so update the pattern now
+ DIR_ARTIST=$(echo "${TAG_ARTIST}" | ${VALIDATE})
+ DIR_ALBUM=$(echo "${TAG_ALBUM}" | ${VALIDATE})
+ PATTERN=$(update_pattern "${OUTPATTERN}" "artist" "${DIR_ARTIST}")
+ PATTERN=$(update_pattern "${PATTERN}" "album" "${DIR_ALBUM}")
+ PATTERN=$(update_pattern "${PATTERN}" "genre" "${TAG_GENRE}")
+ PATTERN=$(update_pattern "${PATTERN}" "year" "${TAG_DATE}")
+ PATTERN=$(update_pattern "${PATTERN}" "ext" "${FORMAT}")
+
+ # construct output directory name
+ OUT="${DIR}"
+
+ if [ ${NOSUBDIRS} -eq 0 ]; then
+ # add path from the pattern
+ path=$(dirname "${PATTERN}")
+ [ "${path}" != "${PATTERN}" ] && OUT="${OUT}/${path}"
+ fi
+
+ # shnsplit is retarded enough to break on double slash
+ OUT=$(echo "${OUT}" | sed s,/[/]*,/,g)
+
+ # remove path from the pattern
+ PATTERN=$(basename "${PATTERN}")
+
+ $msg "${cP}Saving tracks to $cZ\"${OUT}\"\n"
+
+ # split to tracks
+ if [ ${DRY} -ne 1 ]; then
+ # remove if empty and create output dir
+ if [ ${NOSUBDIRS} -eq 0 ]; then
+ rmdir "${OUT}" 2>/dev/null
+ mkdir -p "${OUT}"
+ [ $? -ne 0 ] && { emsg "Failed to create output directory ${OUT} (already split?)\n"; return 1; }
+ fi
+
+ case ${FORMAT} in
+ flac) ENC="flac ${FLAC_ENCODER} ${ENCARGS} - -o %f"; RG="metaflac --add-replay-gain";;
+ m4a) ENC="cust ext=m4a faac ${ENCARGS} -o %f -"; RG="aacgain";;
+ mp3) ENC="cust ext=mp3 lame ${ENCARGS} - %f"; RG="mp3gain";;
+ ogg) ENC="cust ext=ogg oggenc ${ENCARGS} - -o %f"; RG="vorbisgain -a";;
+ wav) ENC="wav ${ENCARGS}"; REPLAY_GAIN=0;;
+ *) emsg "Unknown output format ${FORMAT}\n"; exit 1;;
+ esac
+
+ # split to tracks
+ # sed expression is a fix for "shnsplit: error: m:ss.ff format can only be used with CD-quality files"
+ cuebreakpoints "${CUE}" 2>/dev/null | \
+ sed 's/$/0/' | \
+ shnsplit -O never -o "${ENC}" -d "${OUT}" -t "%n" "${FILE}"
+ if [ $? -ne 0 ]; then
+ emsg "Failed to split\n"
+ return 1
+ fi
+
+ # prepare cover image
+ if [ -n "${PIC}" ]; then
+ convert "${PIC}" -resize "${PIC_SIZE}" "${TMPPIC}"
+ if [ $? -eq 0 ]; then
+ PIC="${TMPPIC}"
+ else
+ $msg "${cR}Failed to convert cover image$cZ\n"
+ unset PIC
+ fi
+ fi
+ fi
+
+ # set tags and rename
+ $msg "\n${cP}Setting tags$cZ\n"
+
+ i=1
+ while [ $i -le ${TRACKS_NUM} ]; do
+ TAG_TITLE=$(cueprint -n $i -t %t "${CUE}" 2>/dev/null)
+ FILE_TRACK="$(printf %02i $i)"
+ FILE_TITLE=$(echo "${TAG_TITLE}" | ${VALIDATE})
+ f="${OUT}/${FILE_TRACK}.${FORMAT}"
+
+ TAG_PERFORMER=$(cueprint -n $i -t %p "${CUE}" 2>/dev/null)
+
+ if [ -n "${TAG_PERFORMER}" -a "${TAG_PERFORMER}" != "${TAG_ARTIST}" ]; then
+ $msg "$i: $cG${TAG_PERFORMER} - ${TAG_TITLE}$cZ\n"
+ else
+ TAG_PERFORMER="${TAG_ARTIST}"
+ $msg "$i: $cG${TAG_TITLE}$cZ\n"
+ fi
+
+ FINAL=$(update_pattern "${OUT}/${PATTERN}" "title" "${FILE_TITLE}")
+ FINAL=$(update_pattern "${FINAL}" "performer" "${TAG_PERFORMER}")
+ FINAL=$(update_pattern "${FINAL}" "track" "${FILE_TRACK}")
+
+ if [ ${DRY} -ne 1 -a "$f" != "${FINAL}" ]; then
+ mv "$f" "${FINAL}"
+ if [ $? -ne 0 ]; then
+ emsg "Failed to rename track file\n"
+ return 1
+ fi
+ fi
+
+ if [ ${DRY} -ne 1 ]; then
+ case ${FORMAT} in
+ flac)
+ ${METAFLAC} --remove-all-tags \
+ --set-tag="ARTIST=${TAG_PERFORMER}" \
+ --set-tag="ALBUM=${TAG_ALBUM}" \
+ --set-tag="TITLE=${TAG_TITLE}" \
+ --set-tag="TRACKNUMBER=$i" \
+ --set-tag="TRACKTOTAL=${TRACKS_NUM}" \
+ "${FINAL}" >/dev/null
+ RES=$?
+
+ [ "${TAG_GENRE}" ] && { ${METAFLAC} --set-tag="GENRE=${TAG_GENRE}" "${FINAL}" >/dev/null; RES=$RES$?; }
+ [ "${TAG_DATE}" ] && { ${METAFLAC} --set-tag="DATE=${TAG_DATE}" "${FINAL}" >/dev/null; RES=$RES$?; }
+ [ "${PIC}" ] && { ${METAFLAC} --import-picture-from="${PIC}" "${FINAL}" >/dev/null; RES=$RES$?; }
+ ;;
+
+ mp3)
+ ${ID3TAG} --artist="${TAG_PERFORMER}" \
+ --album="${TAG_ALBUM}" \
+ --song="${TAG_TITLE}" \
+ --track="$i" \
+ "${FINAL}" >/dev/null
+ RES=$?
+
+ [ "${TAG_GENRE}" ] && { ${ID3TAG} --genre="${TAG_GENRE}" "${FINAL}" >/dev/null; RES=$RES$?; }
+ [ "${TAG_DATE}" ] && { ${ID3TAG} --year="${TAG_DATE}" "${FINAL}" >/dev/null; RES=$RES$?; }
+ ;;
+
+ ogg)
+ ${VORBISCOMMENT} "${FINAL}" \
+ -t "ARTIST=${TAG_PERFORMER}" \
+ -t "ALBUM=${TAG_ALBUM}" \
+ -t "TITLE=${TAG_TITLE}" \
+ -t "TRACKNUMBER=$i" \
+ -t "TRACKTOTAL=${TRACKS_NUM}" >/dev/null
+ RES=$?
+
+ [ "${TAG_GENRE}" ] && { ${VORBISCOMMENT} "${FINAL}" -t "GENRE=${TAG_GENRE}" >/dev/null; RES=$RES$?; }
+ [ "${TAG_DATE}" ] && { ${VORBISCOMMENT} "${FINAL}" -t "DATE=${TAG_DATE}" >/dev/null; RES=$RES$?; }
+ ;;
+
+ m4a)
+ ${MP4TAGS} "${FINAL}" \
+ -a "${TAG_PERFORMER}" \
+ -A "${TAG_ALBUM}" \
+ -s "${TAG_TITLE}" \
+ -t "$i" \
+ -T "${TRACKS_NUM}" >/dev/null
+ RES=$?
+
+ [ "${TAG_GENRE}" ] && { ${MP4TAGS} "${FINAL}" -g "${TAG_GENRE}" >/dev/null; RES=$RES$?; }
+ [ "${TAG_DATE}" ] && { ${MP4TAGS} "${FINAL}" -y "${TAG_DATE}" >/dev/null; RES=$RES$?; }
+ [ "${PIC}" ] && { ${MP4TAGS} "${FINAL}" -P "${PIC}" >/dev/null; RES=$RES$?; }
+ ;;
+
+ wav)
+ RES=0
+ ;;
+
+ *)
+ emsg "Unknown output format ${FORMAT}\n"
+ return 1
+ ;;
+ esac
+
+ if [ ${RES} -ne 0 ]; then
+ emsg "Failed to set tags for track\n"
+ return 1
+ fi
+ fi
+
+ $msg " -> ${cP}${FINAL}$cZ\n"
+
+ i=$(($i + 1))
+ done
+
+ # adjust gain
+ if [ ${REPLAY_GAIN} -ne 0 ]; then
+ $msg "\n${cP}Adjusting gain$cZ\n"
+
+ if [ ${DRY} -ne 1 ]; then
+ ${RG} "${OUT}/"*.${FORMAT} >/dev/null
+
+ if [ $? -ne 0 ]; then
+ emsg "Failed to adjust gain for track\n"
+ return 1
+ fi
+ fi
+ fi
+
+ # copy files
+ if [ ${COPYFILES} -eq 1 -a "${COPYMASKS}" ]; then
+ old=`pwd`
+ cd "${SDIR}"
+ $msg "\n${cG}Copying files:$cZ\n"
+ eval "for i in ${COPYMASKS}; do \
+ test -r \"\$i\" && \
+ echo \" +> \$i\" 2>/dev/null; done"
+ cd "${old}"
+ if [ ${DRY} -ne 1 ]; then
+ eval "for i in ${COPYMASKS}; do \
+ test -r/\"${SDIR}/\$i\" && \
+ cp -r \"${SDIR}/\$i\" \"\${OUT}/\"; done"
+ fi
+ fi
+
+ rm -f "${TMPPIC}"
+ rm -f "${TMPCUE}"
+
+ if [ ${DRY} -ne 1 -a ${REMOVE} -eq 1 ]; then
+ YEP="n"
+
+ if [ ${FORCE} -ne 1 ]; then
+ echo -n "Are you sure you want to delete original? [y/N] > "
+ read YEP
+ fi
+
+ [ "${YEP}" = "y" -o "${YEP}" = "Y" -o ${FORCE} -eq 1 ] && rm -f "${FILE}"
+ fi
+
+ return 0
+}
+
+# searches for files in a directory and splits them
+split_collection () {
+ rm -f "${FAILED}"
+ NUM_FAILED=0
+ OLDIFS=${IFS}
+ OLDCHARSET="${CHARSET}"
+ # set IFS to newline. we do not use 'read' here because we may want to ask user for input
+ IFS="
+"
+
+ for FILE in `find "$1" -iname '*.flac' -o -iname '*.ape' -o -iname '*.tta' -o -iname '*.wv' -o -iname '*.wav'`; do
+ IFS=${OLDIFS}
+ CHARSET=${OLDCHARSET}
+ $msg "$cG>> $cC\"${FILE}\"$cZ\n"
+ unset PIC CUE
+ split_file "${FILE}"
+
+ if [ ! $? -eq 0 ]; then
+ emsg "Failed to split \"${FILE}\"\n"
+ echo "${FILE}" >> "${FAILED}"
+ NUM_FAILED=$((${NUM_FAILED} + 1))
+ fi
+
+ echo
+ done
+
+ if [ ${NUM_FAILED} -ne 0 ]; then
+ emsg "${NUM_FAILED} file(s) failed to split (already split?):\n"
+ $msg "${cR}\n"
+ sort "${FAILED}" -o "${FAILED}"
+ cat "${FAILED}"
+ emsg "\nThese files are also listed in ${FAILED}.\n"
+ return 1
+ fi
+
+ return 0
+}
+
+if [ -d "${INPATH}" ]; then
+ if [ ! -x "${INPATH}" ]; then
+ emsg "Directory \"${INPATH}\" is not accessible\n"
+ exit 2
+ fi
+ $msg "${cG}Input dir :$cZ ${INPATH}$cZ\n\n"
+ split_collection "${INPATH}"
+elif [ -n "${INPATH}" ]; then
+ split_file "${INPATH}"
+else
+ emsg "No input filename given. Use -h for help.\n"
+ exit 1
+fi
+
+# exit code of split_collection or split_file
+STATUS=$?
+
+$msg "\n${cP}Finished$cZ\n"
+
+[ ${STATUS} -ne 0 ] && exit 3 || exit 0
+
+### Local Variables: ***
+### mode:sh ***
+### tab-width:4 ***
+### End: ***
diff --git a/audio/wma2mp3.sh b/audio/wma2mp3.sh
new file mode 100755
index 0000000..db51e56
--- /dev/null
+++ b/audio/wma2mp3.sh
@@ -0,0 +1,20 @@
+#!/bin/sh
+# convert *.wma to *.mp3
+
+current_directory=$( pwd )
+
+#remove spaces
+for i in *.wma; do mv "$i" `echo $i | tr ' ' '_'`; done
+
+#remove uppercase
+for i in *.[Ww][Mm][Aa]; do mv "$i" `echo $i | tr '[A-Z]' '[a-z]'`; done
+
+#Rip with Mplayer / encode with LAME
+for i in *.wma ; do mplayer -vo null -vc dummy -af resample=44100 -ao pcm
+ -waveheader $i && lame -m s audiodump.wav -o $i; done
+
+#convert file names
+for i in *.wma; do mv "$i" "`basename "$i" .wma`.mp3"; done
+
+rm audiodump.wav
+
diff --git a/bin/gen_points.py b/bin/gen_points.py
new file mode 100755
index 0000000..57733dc
--- /dev/null
+++ b/bin/gen_points.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Aaron LI
+# 2015/06/19
+
+"""
+Generate the required number of random points within the required region.
+"""
+
+__version__ = "0.1.0"
+__date__ = "2015/06/19"
+DEBUG = True
+
+import sys
+import argparse
+import random
+import time
+import re
+
+from rand.sphere import sphere_point
+from region.region import Region
+
+random.seed(time.time())
+
+
+def parse_region(regstring):
+ reg_par = re.sub(r"[(),]", " ", regstring).split()
+ regtype = reg_par[0].lower()
+ if regtype == "box":
+ xc = float(reg_par[1])
+ yc = float(reg_par[2])
+ width = parse_reg_value(reg_par[3])
+ height = parse_reg_value(reg_par[4])
+ rotation = float(reg_par[5])
+ reg = Region(regtype, xc=xc, yc=yc,
+ width=width, height=height, rotation=rotation)
+ else:
+ raise ValueError("region type '%s' currently not implemented" % regtype)
+ return reg
+
+
+def parse_reg_value(valstring):
+ if valstring[-1] == '"':
+ # arcsec -> deg
+ value = float(valstring.split('"')[0]) / 60.0 / 60.0
+ elif valstring[-1] == "'":
+ # arcmin -> deg
+ value = float(valstring.split("'")[0]) / 60.0
+ else:
+ value = float(valstring)
+ return value
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Generate random point within the given region.")
+ parser.add_argument("-V", "--version", action="version",
+ version="%(prog)s " + "%s (%s)" % (__version__, __date__))
+ parser.add_argument("-n", "--number", dest="number",
+ type=int, default=1,
+ help="number of points to be generated")
+ parser.add_argument("-r", "--region", dest="region", required=True,
+ help="DS9 region")
+ args = parser.parse_args()
+
+ reg = parse_region(args.region)
+ if DEBUG:
+ print("DEBUG: region: ", reg.dump(), file=sys.stderr)
+
+ points = []
+ while len(points) < args.number:
+ p = sphere_point(unit="deg")
+ if reg.is_inside(p):
+ points.append(p)
+ print("%s %s" % p)
+
+
+if __name__ == "__main__":
+ main()
+
diff --git a/bin/img2list.py b/bin/img2list.py
new file mode 100644
index 0000000..48d0de4
--- /dev/null
+++ b/bin/img2list.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Aaron LI
+# 2015/06/23
+#
+
+
+import numpy as np
+from astropy.io import fits
+
+
+def img2list(imgdata, mask=None):
+ """
+ Convert a image matrix to list of point coordinates.
+ The input image matrix is taken as an integer matrix.
+ If one pixel has value n (>1), then it is repeated n times.
+ """
+ img = imgdata.astype(int)
+ points = []
+ ii, jj = np.nonzero(img >= 1)
+ while len(ii) > 0:
+ for i, j in zip(ii, jj):
+ points.append([i, j])
+ img[ii, jj] -= 1
+ ii, jj = np.nonzero(img >= 1)
+ return np.array(points)
+
diff --git a/cli/colors.sh b/cli/colors.sh
new file mode 100755
index 0000000..a28c261
--- /dev/null
+++ b/cli/colors.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+# https://gist.github.com/esundahl/1651086
+
+function color_test {
+ # Daniel Crisman's ANSI color chart script from
+ # The Bash Prompt HOWTO: 6.1. Colours
+ # http://www.tldp.org/HOWTO/Bash-Prompt-HOWTO/x329.html
+ #
+ # This function echoes a bunch of color codes to the
+ # terminal to demonstrate what's available. Each
+ # line is the color code of one forground color,
+ # out of 17 (default + 16 escapes), followed by a
+ # test use of that color on all nine background
+ # colors (default + 8 escapes).
+ #
+
+ T='gYw' # The test text
+
+ echo -e "\n 40m 41m 42m 43m\
+ 44m 45m 46m 47m"
+
+ for FGs in ' m' ' 1m' ' 30m' '1;30m' ' 31m' '1;31m' \
+ ' 32m' '1;32m' ' 33m' '1;33m' ' 34m' '1;34m' \
+ ' 35m' '1;35m' ' 36m' '1;36m' ' 37m' '1;37m';
+ do
+ FG=${FGs// /}
+ echo -en " $FGs \033[$FG $T "
+ for BG in 40m 41m 42m 43m 44m 45m 46m 47m; do
+ echo -en "$EINS \033[$FG\033[$BG $T \033[0m"
+ done
+ echo;
+ done
+ echo
+}
+
+color_test
+
diff --git a/cli/colortest.bash b/cli/colortest.bash
new file mode 100755
index 0000000..c777b9e
--- /dev/null
+++ b/cli/colortest.bash
@@ -0,0 +1,39 @@
+#!/usr/bin/env bash
+#
+# ANSI color scheme script featuring Space Invaders
+#
+# Original: http://crunchbang.org/forums/viewtopic.php?pid=126921%23p126921#p126921
+# Modified by lolilolicon
+#
+
+f=3 b=4
+for j in f b; do
+ for i in {0..7}; do
+ printf -v $j$i %b "\e[${!j}${i}m"
+ done
+done
+bld=$'\e[1m'
+rst=$'\e[0m'
+
+cat << EOF
+
+ $f1 ▀▄ ▄▀ $f2 ▄▄▄████▄▄▄ $f3 ▄██▄ $f4 ▀▄ ▄▀ $f5 ▄▄▄████▄▄▄ $f6 ▄██▄ $rst
+ $f1 ▄█▀███▀█▄ $f2███▀▀██▀▀███ $f3▄█▀██▀█▄ $f4 ▄█▀███▀█▄ $f5███▀▀██▀▀███ $f6▄█▀██▀█▄$rst
+ $f1█▀███████▀█ $f2▀▀███▀▀███▀▀ $f3▀█▀██▀█▀ $f4█▀███████▀█ $f5▀▀███▀▀███▀▀ $f6▀█▀██▀█▀$rst
+ $f1▀ ▀▄▄ ▄▄▀ ▀ $f2 ▀█▄ ▀▀ ▄█▀ $f3▀▄ ▄▀ $f4▀ ▀▄▄ ▄▄▀ ▀ $f5 ▀█▄ ▀▀ ▄█▀ $f6▀▄ ▄▀$rst
+
+ $bld$f1▄ ▀▄ ▄▀ ▄ $f2 ▄▄▄████▄▄▄ $f3 ▄██▄ $f4▄ ▀▄ ▄▀ ▄ $f5 ▄▄▄████▄▄▄ $f6 ▄██▄ $rst
+ $bld$f1█▄█▀███▀█▄█ $f2███▀▀██▀▀███ $f3▄█▀██▀█▄ $f4█▄█▀███▀█▄█ $f5███▀▀██▀▀███ $f6▄█▀██▀█▄$rst
+ $bld$f1▀█████████▀ $f2▀▀▀██▀▀██▀▀▀ $f3▀▀█▀▀█▀▀ $f4▀█████████▀ $f5▀▀▀██▀▀██▀▀▀ $f6▀▀█▀▀█▀▀$rst
+ $bld$f1 ▄▀ ▀▄ $f2▄▄▀▀ ▀▀ ▀▀▄▄ $f3▄▀▄▀▀▄▀▄ $f4 ▄▀ ▀▄ $f5▄▄▀▀ ▀▀ ▀▀▄▄ $f6▄▀▄▀▀▄▀▄$rst
+
+
+ $f7▌$rst
+
+ $f7▌$rst
+
+ $f7 ▄█▄ $rst
+ $f7▄█████████▄$rst
+ $f7▀▀▀▀▀▀▀▀▀▀▀$rst
+
+EOF
diff --git a/cli/colortest.lua b/cli/colortest.lua
new file mode 100755
index 0000000..8f8c95f
--- /dev/null
+++ b/cli/colortest.lua
@@ -0,0 +1,22 @@
+#!/usr/bin/env lua
+
+function cl(e)
+ return string.format('\27[%sm', e)
+end
+
+function print_fg(bg, pre)
+ for fg = 30,37 do
+ fg = pre..fg
+ io.write(cl(bg), cl(fg), string.format(' %6s ', fg), cl(0))
+ end
+end
+
+for bg = 40,47 do
+ io.write(cl(0), ' ', bg, ' ')
+ print_fg(bg, ' ')
+ io.write('\n ')
+ print_fg(bg, '1;')
+ io.write('\n\n')
+end
+
+-- Andres P
diff --git a/cli/colortest.pl b/cli/colortest.pl
new file mode 100755
index 0000000..767789e
--- /dev/null
+++ b/cli/colortest.pl
@@ -0,0 +1,365 @@
+#!/usr/bin/env perl
+
+# by entheon, do whatever the hell you want with this file
+
+print "\n";
+print "*****************************\n";
+print "* XTERM 256Color Test Chart *\n";
+print "*****************************\n";
+print "* 16 = black\n";
+print "* 255 = white\n";
+print "*\n";
+print "* Usage:\n";
+print "* colortest.pl -w\n";
+print "* wide display\n";
+print "*\n";
+print "* colortest.pl -w -r\n";
+print "* wide display reversed\n";
+print "*\n";
+print "* colortest.pl -w -s\n";
+print "* extra spaces padding\n";
+print "*\n";
+print "* colortest.pl -w -r -s\n";
+print "* available combination\n";
+print "*\n";
+print "**************************\n";
+
+if( $ARGV[0] eq "-w" || $ARGV[1] eq "-w" || $ARGV[2] eq "-w" ) {
+ push(@arr, [( " 16: 00/00/00", " 17: 00/00/5f", " 18: 00/00/87", " 19: 00/00/af", " 20: 00/00/d7", " 21: 00/00/ff")] );
+ push(@arr, [( " 22: 00/5f/00", " 23: 00/5f/5f", " 24: 00/5f/87", " 25: 00/5f/af", " 26: 00/5f/d7", " 27: 00/5f/ff")] );
+ push(@arr, [( " 28: 00/87/00", " 29: 00/87/5f", " 30: 00/87/87", " 31: 00/87/af", " 32: 00/87/d7", " 33: 00/87/ff")] );
+ push(@arr, [( " 34: 00/af/00", " 35: 00/af/5f", " 36: 00/af/87", " 37: 00/af/af", " 38: 00/af/d7", " 39: 00/af/ff")] );
+ push(@arr, [( " 40: 00/d7/00", " 41: 00/d7/5f", " 42: 00/d7/87", " 43: 00/d7/af", " 44: 00/d7/d7", " 45: 00/d7/ff")] );
+ push(@arr, [( " 46: 00/ff/00", " 47: 00/ff/5f", " 48: 00/ff/87", " 49: 00/ff/af", " 50: 00/ff/d7", " 51: 00/ff/ff")] );
+ push(@arr, [( " 52: 5f/00/00", " 53: 5f/00/5f", " 54: 5f/00/87", " 55: 5f/00/af", " 56: 5f/00/d7", " 57: 5f/00/ff")] );
+ push(@arr, [( " 58: 5f/5f/00", " 59: 5f/5f/5f", " 60: 5f/5f/87", " 61: 5f/5f/af", " 62: 5f/5f/d7", " 63: 5f/5f/ff")] );
+ push(@arr, [( " 64: 5f/87/00", " 65: 5f/87/5f", " 66: 5f/87/87", " 67: 5f/87/af", " 68: 5f/87/d7", " 69: 5f/87/ff")] );
+ push(@arr, [( " 70: 5f/af/00", " 71: 5f/af/5f", " 72: 5f/af/87", " 73: 5f/af/af", " 74: 5f/af/d7", " 75: 5f/af/ff")] );
+ push(@arr, [( " 76: 5f/d7/00", " 77: 5f/d7/5f", " 78: 5f/d7/87", " 79: 5f/d7/af", " 80: 5f/d7/d7", " 81: 5f/d7/ff")] );
+ push(@arr, [( " 82: 5f/ff/00", " 83: 5f/ff/5f", " 84: 5f/ff/87", " 85: 5f/ff/af", " 86: 5f/ff/d7", " 87: 5f/ff/ff")] );
+ push(@arr, [( " 88: 87/00/00", " 89: 87/00/5f", " 90: 87/00/87", " 91: 87/00/af", " 92: 87/00/d7", " 93: 87/00/ff")] );
+ push(@arr, [( " 94: 87/5f/00", " 95: 87/5f/5f", " 96: 87/5f/87", " 97: 87/5f/af", " 98: 87/5f/d7", " 99: 87/5f/ff")] );
+ push(@arr, [( " 100: 87/87/00", " 101: 87/87/5f", " 102: 87/87/87", " 103: 87/87/af", " 104: 87/87/d7", " 105: 87/87/ff")] );
+ push(@arr, [( " 106: 87/af/00", " 107: 87/af/5f", " 108: 87/af/87", " 109: 87/af/af", " 110: 87/af/d7", " 111: 87/af/ff")] );
+ push(@arr, [( " 112: 87/d7/00", " 113: 87/d7/5f", " 114: 87/d7/87", " 115: 87/d7/af", " 116: 87/d7/d7", " 117: 87/d7/ff")] );
+ push(@arr, [( " 118: 87/ff/00", " 119: 87/ff/5f", " 120: 87/ff/87", " 121: 87/ff/af", " 122: 87/ff/d7", " 123: 87/ff/ff")] );
+ push(@arr, [( " 124: af/00/00", " 125: af/00/5f", " 126: af/00/87", " 127: af/00/af", " 128: af/00/d7", " 129: af/00/ff")] );
+ push(@arr, [( " 130: af/5f/00", " 131: af/5f/5f", " 132: af/5f/87", " 133: af/5f/af", " 134: af/5f/d7", " 135: af/5f/ff")] );
+ push(@arr, [( " 136: af/87/00", " 137: af/87/5f", " 138: af/87/87", " 139: af/87/af", " 140: af/87/d7", " 141: af/87/ff")] );
+ push(@arr, [( " 142: af/af/00", " 143: af/af/5f", " 144: af/af/87", " 145: af/af/af", " 146: af/af/d7", " 147: af/af/ff")] );
+ push(@arr, [( " 148: af/d7/00", " 149: af/d7/5f", " 150: af/d7/87", " 151: af/d7/af", " 152: af/d7/d7", " 153: af/d7/ff")] );
+ push(@arr, [( " 154: af/ff/00", " 155: af/ff/5f", " 156: af/ff/87", " 157: af/ff/af", " 158: af/ff/d7", " 159: af/ff/ff")] );
+ push(@arr, [( " 160: d7/00/00", " 161: d7/00/5f", " 162: d7/00/87", " 163: d7/00/af", " 164: d7/00/d7", " 165: d7/00/ff")] );
+ push(@arr, [( " 166: d7/5f/00", " 167: d7/5f/5f", " 168: d7/5f/87", " 169: d7/5f/af", " 170: d7/5f/d7", " 171: d7/5f/ff")] );
+ push(@arr, [( " 172: d7/87/00", " 173: d7/87/5f", " 174: d7/87/87", " 175: d7/87/af", " 176: d7/87/d7", " 177: d7/87/ff")] );
+ push(@arr, [( " 178: d7/af/00", " 179: d7/af/5f", " 180: d7/af/87", " 181: d7/af/af", " 182: d7/af/d7", " 183: d7/af/ff")] );
+ push(@arr, [( " 184: d7/d7/00", " 185: d7/d7/5f", " 186: d7/d7/87", " 187: d7/d7/af", " 188: d7/d7/d7", " 189: d7/d7/ff")] );
+ push(@arr, [( " 190: d7/ff/00", " 191: d7/ff/5f", " 192: d7/ff/87", " 193: d7/ff/af", " 194: d7/ff/d7", " 195: d7/ff/ff")] );
+ push(@arr, [( " 196: ff/00/00", " 197: ff/00/5f", " 198: ff/00/87", " 199: ff/00/af", " 200: ff/00/d7", " 201: ff/00/ff")] );
+ push(@arr, [( " 202: ff/5f/00", " 203: ff/5f/5f", " 204: ff/5f/87", " 205: ff/5f/af", " 206: ff/5f/d7", " 207: ff/5f/ff")] );
+ push(@arr, [( " 208: ff/87/00", " 209: ff/87/5f", " 210: ff/87/87", " 211: ff/87/af", " 212: ff/87/d7", " 213: ff/87/ff")] );
+ push(@arr, [( " 214: ff/af/00", " 215: ff/af/5f", " 216: ff/af/87", " 217: ff/af/af", " 218: ff/af/d7", " 219: ff/af/ff")] );
+ push(@arr, [( " 220: ff/d7/00", " 221: ff/d7/5f", " 222: ff/d7/87", " 223: ff/d7/af", " 224: ff/d7/d7", " 225: ff/d7/ff")] );
+ push(@arr, [( " 226: ff/ff/00", " 227: ff/ff/5f", " 228: ff/ff/87", " 229: ff/ff/af", " 230: ff/ff/d7", " 231: ff/ff/ff")] );
+ push(@arr, [( " 232: 08/08/08", " 233: 12/12/12", " 234: 1c/1c/1c", " 235: 26/26/26", " 236: 30/30/30", " 237: 3a/3a/3a")] );
+ push(@arr, [( " 238: 44/44/44", " 239: 4e/4e/4e", " 240: 58/58/58", " 241: 62/62/62", " 242: 6c/6c/6c", " 243: 76/76/76")] );
+ push(@arr, [( " 244: 80/80/80", " 245: 8a/8a/8a", " 246: 94/94/94", " 247: 9e/9e/9e", " 248: a8/a8/a8", " 249: b2/b2/b2")] );
+ push(@arr, [( " 250: bc/bc/bc", " 251: c6/c6/c6", " 252: d0/d0/d0", " 253: da/da/da", " 254: e4/e4/e4", " 255: ee/ee/ee")] );
+
+ if( $ARGV[0] eq "-s" || $ARGV[1] eq "-s" || $ARGV[2] eq "-s" ){
+ $padding = " ";
+ }
+ else {
+
+ }
+
+ # display in reverse order
+ if( $ARGV[0] eq "-r" || $ARGV[1] eq "-r" || $ARGV[2] eq "-r" ){
+ for( $dimone = 0; $dimone < scalar @arr; $dimone++ ) {
+
+ $seed = ($dimone % 6) * -1;
+ for( $dimtwo = 0; $dimtwo < 6; $dimtwo++ ) {
+
+ $movone = $seed;
+ $movtwo = $seed * -1;
+
+ print $arr[$dimone][$dimtwo] . $padding;
+
+ $seed = $seed+1;
+ }
+
+ print "\n";
+ }
+ }
+ else {
+ for( $dimone = 0; $dimone < scalar @arr; $dimone++ ) {
+
+ $seed = ($dimone % 6) * -1;
+ for( $dimtwo = 0; $dimtwo < 6; $dimtwo++ ) {
+
+ $movone = $seed;
+ $movtwo = $seed * -1;
+
+ $newone = $dimone+$movone;
+ $newtwo = $dimtwo+$movtwo;
+
+ if( $newone < scalar @arr ){
+ print $arr[$newone][$newtwo] . $padding;
+ }
+
+ $seed = $seed+1;
+ }
+
+ print "\n";
+ }
+ }
+ print "\n";
+ print "\n";
+
+}
+else {
+ print " 16: 00/00/00\n";
+ print " 17: 00/00/5f\n";
+ print " 18: 00/00/87\n";
+ print " 19: 00/00/af\n";
+ print " 20: 00/00/d7\n";
+ print " 21: 00/00/ff\n";
+ print " 22: 00/5f/00\n";
+ print " 23: 00/5f/5f\n";
+ print " 24: 00/5f/87\n";
+ print " 25: 00/5f/af\n";
+ print " 26: 00/5f/d7\n";
+ print " 27: 00/5f/ff\n";
+ print " 28: 00/87/00\n";
+ print " 29: 00/87/5f\n";
+ print " 30: 00/87/87\n";
+ print " 31: 00/87/af\n";
+ print " 32: 00/87/d7\n";
+ print " 33: 00/87/ff\n";
+ print " 34: 00/af/00\n";
+ print " 35: 00/af/5f\n";
+ print " 36: 00/af/87\n";
+ print " 37: 00/af/af\n";
+ print " 38: 00/af/d7\n";
+ print " 39: 00/af/ff\n";
+ print " 40: 00/d7/00\n";
+ print " 41: 00/d7/5f\n";
+ print " 42: 00/d7/87\n";
+ print " 43: 00/d7/af\n";
+ print " 44: 00/d7/d7\n";
+ print " 45: 00/d7/ff\n";
+ print " 46: 00/ff/00\n";
+ print " 47: 00/ff/5f\n";
+ print " 48: 00/ff/87\n";
+ print " 49: 00/ff/af\n";
+ print " 50: 00/ff/d7\n";
+ print " 51: 00/ff/ff\n";
+ print " 52: 5f/00/00\n";
+ print " 53: 5f/00/5f\n";
+ print " 54: 5f/00/87\n";
+ print " 55: 5f/00/af\n";
+ print " 56: 5f/00/d7\n";
+ print " 57: 5f/00/ff\n";
+ print " 58: 5f/5f/00\n";
+ print " 59: 5f/5f/5f\n";
+ print " 60: 5f/5f/87\n";
+ print " 61: 5f/5f/af\n";
+ print " 62: 5f/5f/d7\n";
+ print " 63: 5f/5f/ff\n";
+ print " 64: 5f/87/00\n";
+ print " 65: 5f/87/5f\n";
+ print " 66: 5f/87/87\n";
+ print " 67: 5f/87/af\n";
+ print " 68: 5f/87/d7\n";
+ print " 69: 5f/87/ff\n";
+ print " 70: 5f/af/00\n";
+ print " 71: 5f/af/5f\n";
+ print " 72: 5f/af/87\n";
+ print " 73: 5f/af/af\n";
+ print " 74: 5f/af/d7\n";
+ print " 75: 5f/af/ff\n";
+ print " 76: 5f/d7/00\n";
+ print " 77: 5f/d7/5f\n";
+ print " 78: 5f/d7/87\n";
+ print " 79: 5f/d7/af\n";
+ print " 80: 5f/d7/d7\n";
+ print " 81: 5f/d7/ff\n";
+ print " 82: 5f/ff/00\n";
+ print " 83: 5f/ff/5f\n";
+ print " 84: 5f/ff/87\n";
+ print " 85: 5f/ff/af\n";
+ print " 86: 5f/ff/d7\n";
+ print " 87: 5f/ff/ff\n";
+ print " 88: 87/00/00\n";
+ print " 89: 87/00/5f\n";
+ print " 90: 87/00/87\n";
+ print " 91: 87/00/af\n";
+ print " 92: 87/00/d7\n";
+ print " 93: 87/00/ff\n";
+ print " 94: 87/5f/00\n";
+ print " 95: 87/5f/5f\n";
+ print " 96: 87/5f/87\n";
+ print " 97: 87/5f/af\n";
+ print " 98: 87/5f/d7\n";
+ print " 99: 87/5f/ff\n";
+ print " 100 :87/87/00\n";
+ print " 101 :87/87/5f\n";
+ print " 102 :87/87/87\n";
+ print " 103 :87/87/af\n";
+ print " 104 :87/87/d7\n";
+ print " 105 :87/87/ff\n";
+ print " 106 :87/af/00\n";
+ print " 107 :87/af/5f\n";
+ print " 108 :87/af/87\n";
+ print " 109 :87/af/af\n";
+ print " 110 :87/af/d7\n";
+ print " 111 :87/af/ff\n";
+ print " 112 :87/d7/00\n";
+ print " 113 :87/d7/5f\n";
+ print " 114 :87/d7/87\n";
+ print " 115 :87/d7/af\n";
+ print " 116 :87/d7/d7\n";
+ print " 117 :87/d7/ff\n";
+ print " 118 :87/ff/00\n";
+ print " 119 :87/ff/5f\n";
+ print " 120 :87/ff/87\n";
+ print " 121 :87/ff/af\n";
+ print " 122 :87/ff/d7\n";
+ print " 123 :87/ff/ff\n";
+ print " 124 :af/00/00\n";
+ print " 125 :af/00/5f\n";
+ print " 126 :af/00/87\n";
+ print " 127 :af/00/af\n";
+ print " 128 :af/00/d7\n";
+ print " 129 :af/00/ff\n";
+ print " 130 :af/5f/00\n";
+ print " 131 :af/5f/5f\n";
+ print " 132 :af/5f/87\n";
+ print " 133 :af/5f/af\n";
+ print " 134 :af/5f/d7\n";
+ print " 135 :af/5f/ff\n";
+ print " 136 :af/87/00\n";
+ print " 137 :af/87/5f\n";
+ print " 138 :af/87/87\n";
+ print " 139 :af/87/af\n";
+ print " 140 :af/87/d7\n";
+ print " 141 :af/87/ff\n";
+ print " 142 :af/af/00\n";
+ print " 143 :af/af/5f\n";
+ print " 144 :af/af/87\n";
+ print " 145 :af/af/af\n";
+ print " 146 :af/af/d7\n";
+ print " 147 :af/af/ff\n";
+ print " 148 :af/d7/00\n";
+ print " 149 :af/d7/5f\n";
+ print " 150 :af/d7/87\n";
+ print " 151 :af/d7/af\n";
+ print " 152 :af/d7/d7\n";
+ print " 153 :af/d7/ff\n";
+ print " 154 :af/ff/00\n";
+ print " 155 :af/ff/5f\n";
+ print " 156 :af/ff/87\n";
+ print " 157 :af/ff/af\n";
+ print " 158 :af/ff/d7\n";
+ print " 159 :af/ff/ff\n";
+ print " 160 :d7/00/00\n";
+ print " 161 :d7/00/5f\n";
+ print " 162 :d7/00/87\n";
+ print " 163 :d7/00/af\n";
+ print " 164 :d7/00/d7\n";
+ print " 165 :d7/00/ff\n";
+ print " 166 :d7/5f/00\n";
+ print " 167 :d7/5f/5f\n";
+ print " 168 :d7/5f/87\n";
+ print " 169 :d7/5f/af\n";
+ print " 170 :d7/5f/d7\n";
+ print " 171 :d7/5f/ff\n";
+ print " 172 :d7/87/00\n";
+ print " 173 :d7/87/5f\n";
+ print " 174 :d7/87/87\n";
+ print " 175 :d7/87/af\n";
+ print " 176 :d7/87/d7\n";
+ print " 177 :d7/87/ff\n";
+ print " 178 :d7/af/00\n";
+ print " 179 :d7/af/5f\n";
+ print " 180 :d7/af/87\n";
+ print " 181 :d7/af/af\n";
+ print " 182 :d7/af/d7\n";
+ print " 183 :d7/af/ff\n";
+ print " 184 :d7/d7/00\n";
+ print " 185 :d7/d7/5f\n";
+ print " 186 :d7/d7/87\n";
+ print " 187 :d7/d7/af\n";
+ print " 188 :d7/d7/d7\n";
+ print " 189 :d7/d7/ff\n";
+ print " 190 :d7/ff/00\n";
+ print " 191 :d7/ff/5f\n";
+ print " 192 :d7/ff/87\n";
+ print " 193 :d7/ff/af\n";
+ print " 194 :d7/ff/d7\n";
+ print " 195 :d7/ff/ff\n";
+ print " 196 :ff/00/00\n";
+ print " 197 :ff/00/5f\n";
+ print " 198 :ff/00/87\n";
+ print " 199 :ff/00/af\n";
+ print " 200 :ff/00/d7\n";
+ print " 201 :ff/00/ff\n";
+ print " 202 :ff/5f/00\n";
+ print " 203 :ff/5f/5f\n";
+ print " 204 :ff/5f/87\n";
+ print " 205 :ff/5f/af\n";
+ print " 206 :ff/5f/d7\n";
+ print " 207 :ff/5f/ff\n";
+ print " 208 :ff/87/00\n";
+ print " 209 :ff/87/5f\n";
+ print " 210 :ff/87/87\n";
+ print " 211 :ff/87/af\n";
+ print " 212 :ff/87/d7\n";
+ print " 213 :ff/87/ff\n";
+ print " 214 :ff/af/00\n";
+ print " 215 :ff/af/5f\n";
+ print " 216 :ff/af/87\n";
+ print " 217 :ff/af/af\n";
+ print " 218 :ff/af/d7\n";
+ print " 219 :ff/af/ff\n";
+ print " 220 :ff/d7/00\n";
+ print " 221 :ff/d7/5f\n";
+ print " 222 :ff/d7/87\n";
+ print " 223 :ff/d7/af\n";
+ print " 224 :ff/d7/d7\n";
+ print " 225 :ff/d7/ff\n";
+ print " 226 :ff/ff/00\n";
+ print " 227 :ff/ff/5f\n";
+ print " 228 :ff/ff/87\n";
+ print " 229 :ff/ff/af\n";
+ print " 230 :ff/ff/d7\n";
+ print " 231 :ff/ff/ff\n";
+ print " 232 :08/08/08\n";
+ print " 233 :12/12/12\n";
+ print " 234 :1c/1c/1c\n";
+ print " 235 :26/26/26\n";
+ print " 236 :30/30/30\n";
+ print " 237 :3a/3a/3a\n";
+ print " 238 :44/44/44\n";
+ print " 239 :4e/4e/4e\n";
+ print " 240 :58/58/58\n";
+ print " 241 :62/62/62\n";
+ print " 242 :6c/6c/6c\n";
+ print " 243 :76/76/76\n";
+ print " 244 :80/80/80\n";
+ print " 245 :8a/8a/8a\n";
+ print " 246 :94/94/94\n";
+ print " 247 :9e/9e/9e\n";
+ print " 248 :a8/a8/a8\n";
+ print " 249 :b2/b2/b2\n";
+ print " 250 :bc/bc/bc\n";
+ print " 251 :c6/c6/c6\n";
+ print " 252 :d0/d0/d0\n";
+ print " 253 :da/da/da\n";
+ print " 254 :e4/e4/e4\n";
+ print " 255 :ee/ee/ee\n";
+ print "\n";
+ print "\n";
+}
+print "0m";
+exit;
diff --git a/cli/colortest.py b/cli/colortest.py
new file mode 100755
index 0000000..2d29590
--- /dev/null
+++ b/cli/colortest.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+#
+# http://askubuntu.com/questions/27314/script-to-display-all-terminal-colors
+
+import sys
+
+terse = "-t" in sys.argv[1:] or "--terse" in sys.argv[1:]
+
+for i in range(2 if terse else 10):
+ for j in range(30, 38):
+ for k in range(40, 48):
+ if terse:
+ print "\33[%d;%d;%dm%d;%d;%d\33[m " % (i, j, k, i, j, k),
+ else:
+ print ("%d;%d;%d: \33[%d;%d;%dm Hello, World! \33[m " %
+ (i, j, k, i, j, k, ))
+ print
+
diff --git a/cli/colortest.rb b/cli/colortest.rb
new file mode 100755
index 0000000..cc5d6d6
--- /dev/null
+++ b/cli/colortest.rb
@@ -0,0 +1,26 @@
+#!/usr/bin/env ruby
+# coding: utf-8
+
+# ANSI color scheme script
+# Author: Ivaylo Kuzev < Ivo >
+# Original: http://crunchbang.org/forums/viewtopic.php?pid=134749%23p134749#p134749
+# Modified using Ruby.
+
+CL = "\e[0m"
+BO = "\e[1m"
+
+R = "\e[31m"
+G = "\e[32m"
+Y = "\e[33m"
+B = "\e[34m"
+P = "\e[35m"
+C = "\e[36m"
+
+print <<EOF
+
+#{BO}#{R} ██████ #{CL} #{BO}#{G}██████ #{CL}#{BO}#{Y} ██████#{CL} #{BO}#{B}██████ #{CL} #{BO}#{P} ██████#{CL} #{BO}#{C} ███████#{CL}
+#{BO}#{R} ████████#{CL} #{BO}#{G}██ ██ #{CL}#{BO}#{Y}██ #{CL} #{BO}#{B}██ ██#{CL} #{BO}#{P}██████ #{CL} #{BO}#{C} █████████#{CL}
+#{R} ██ ████#{CL} #{G}██ ████#{CL}#{Y} ████ #{CL} #{B}████ ██#{CL} #{P}████ #{CL} #{C}█████ #{CL}
+#{R} ██ ██#{CL} #{G}██████ #{CL}#{Y} ████████#{CL} #{B}██████ #{CL} #{P}████████#{CL} #{C}██ #{CL}
+
+EOF
diff --git a/cli/colortest.sh b/cli/colortest.sh
new file mode 100755
index 0000000..3974d69
--- /dev/null
+++ b/cli/colortest.sh
@@ -0,0 +1,53 @@
+#!/bin/sh
+# Original Posted at http://crunchbang.org/forums/viewtopic.php?pid=126921%23p126921#p126921
+# [ESC] character in original post removed here.
+
+# ANSI Color -- use these variables to easily have different color
+# and format output. Make sure to output the reset sequence after
+# colors (f = foreground, b = background), and use the 'off'
+# feature for anything you turn on.
+
+initializeANSI()
+{
+ esc="$(echo -en '\e')"
+
+ blackf="${esc}[30m"; redf="${esc}[31m"; greenf="${esc}[32m"
+ yellowf="${esc}[33m" bluef="${esc}[34m"; purplef="${esc}[35m"
+ cyanf="${esc}[36m"; whitef="${esc}[37m"
+
+ blackb="${esc}[40m"; redb="${esc}[41m"; greenb="${esc}[42m"
+ yellowb="${esc}[43m" blueb="${esc}[44m"; purpleb="${esc}[45m"
+ cyanb="${esc}[46m"; whiteb="${esc}[47m"
+
+ boldon="${esc}[1m"; boldoff="${esc}[22m"
+ italicson="${esc}[3m"; italicsoff="${esc}[23m"
+ ulon="${esc}[4m"; uloff="${esc}[24m"
+ invon="${esc}[7m"; invoff="${esc}[27m"
+
+ reset="${esc}[0m"
+}
+
+# note in this first use that switching colors doesn't require a reset
+# first - the new color overrides the old one.
+
+#clear
+
+initializeANSI
+
+cat << EOF
+
+ ${yellowf} ▄███████▄${reset} ${redf} ▄██████▄${reset} ${greenf} ▄██████▄${reset} ${bluef} ▄██████▄${reset} ${purplef} ▄██████▄${reset} ${cyanf} ▄██████▄${reset}
+ ${yellowf}▄█████████▀▀${reset} ${redf}▄${whitef}█▀█${redf}██${whitef}█▀█${redf}██▄${reset} ${greenf}▄${whitef}█▀█${greenf}██${whitef}█▀█${greenf}██▄${reset} ${bluef}▄${whitef}█▀█${bluef}██${whitef}█▀█${bluef}██▄${reset} ${purplef}▄${whitef}█▀█${purplef}██${whitef}█▀█${purplef}██▄${reset} ${cyanf}▄${whitef}█▀█${cyanf}██${whitef}█▀█${cyanf}██▄${reset}
+ ${yellowf}███████▀${reset} ${redf}█${whitef}▄▄█${redf}██${whitef}▄▄█${redf}███${reset} ${greenf}█${whitef}▄▄█${greenf}██${whitef}▄▄█${greenf}███${reset} ${bluef}█${whitef}▄▄█${bluef}██${whitef}▄▄█${bluef}███${reset} ${purplef}█${whitef}▄▄█${purplef}██${whitef}▄▄█${purplef}███${reset} ${cyanf}█${whitef}▄▄█${cyanf}██${whitef}▄▄█${cyanf}███${reset}
+ ${yellowf}███████▄${reset} ${redf}████████████${reset} ${greenf}████████████${reset} ${bluef}████████████${reset} ${purplef}████████████${reset} ${cyanf}████████████${reset}
+ ${yellowf}▀█████████▄▄${reset} ${redf}██▀██▀▀██▀██${reset} ${greenf}██▀██▀▀██▀██${reset} ${bluef}██▀██▀▀██▀██${reset} ${purplef}██▀██▀▀██▀██${reset} ${cyanf}██▀██▀▀██▀██${reset}
+ ${yellowf} ▀███████▀${reset} ${redf}▀ ▀ ▀ ▀${reset} ${greenf}▀ ▀ ▀ ▀${reset} ${bluef}▀ ▀ ▀ ▀${reset} ${purplef}▀ ▀ ▀ ▀${reset} ${cyanf}▀ ▀ ▀ ▀${reset}
+
+ ${boldon}${yellowf} ▄███████▄ ${redf} ▄██████▄ ${greenf} ▄██████▄ ${bluef} ▄██████▄ ${purplef} ▄██████▄ ${cyanf} ▄██████▄${reset}
+ ${boldon}${yellowf}▄█████████▀▀ ${redf}▄${whitef}█▀█${redf}██${whitef}█▀█${redf}██▄ ${greenf}▄${whitef}█▀█${greenf}██${whitef}█▀█${greenf}██▄ ${bluef}▄${whitef}█▀█${bluef}██${whitef}█▀█${bluef}██▄ ${purplef}▄${whitef}█▀█${purplef}██${whitef}█▀█${purplef}██▄ ${cyanf}▄${whitef}█▀█${cyanf}██${whitef}█▀█${cyanf}██▄${reset}
+ ${boldon}${yellowf}███████▀ ${redf}█${whitef}▄▄█${redf}██${whitef}▄▄█${redf}███ ${greenf}█${whitef}▄▄█${greenf}██${whitef}▄▄█${greenf}███ ${bluef}█${whitef}▄▄█${bluef}██${whitef}▄▄█${bluef}███ ${purplef}█${whitef}▄▄█${purplef}██${whitef}▄▄█${purplef}███ ${cyanf}█${whitef}▄▄█${cyanf}██${whitef}▄▄█${cyanf}███${reset}
+ ${boldon}${yellowf}███████▄ ${redf}████████████ ${greenf}████████████ ${bluef}████████████ ${purplef}████████████ ${cyanf}████████████${reset}
+ ${boldon}${yellowf}▀█████████▄▄ ${redf}██▀██▀▀██▀██ ${greenf}██▀██▀▀██▀██ ${bluef}██▀██▀▀██▀██ ${purplef}██▀██▀▀██▀██ ${cyanf}██▀██▀▀██▀██${reset}
+ ${boldon}${yellowf} ▀███████▀ ${redf}▀ ▀ ▀ ▀ ${greenf}▀ ▀ ▀ ▀ ${bluef}▀ ▀ ▀ ▀ ${purplef}▀ ▀ ▀ ▀ ${cyanf}▀ ▀ ▀ ▀${reset}
+
+EOF
diff --git a/cli/csv2json.py b/cli/csv2json.py
new file mode 100755
index 0000000..54f6be2
--- /dev/null
+++ b/cli/csv2json.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# This is a simple tool that converts CSV file into a JSON file.
+# The first line of the input CSV file is used as the field names.
+#
+# Use 'OrderedDict' to keep the input fields order.
+#
+# Aaron LI
+# 2015/06/11
+#
+
+from __future__ import print_function, division
+
+__version__ = "0.1.0"
+__date__ = "2015/06/11"
+
+import sys
+import argparse
+import csv
+import json
+
+from collections import OrderedDict
+
+
+def csv2json(csvfile, jsonfile=None):
+ """
+ Convert CSV data to JSON data.
+ The first line of CSV data is used as the field names.
+
+ Return:
+ If jsonfile is None, then return a list of JSON dict.
+ """
+ if not hasattr(csvfile, "read"):
+ csvfile = open(csvfile, "r")
+ if (jsonfile is not None) and (not hasattr(jsonfile, "write")):
+ jsonfile = open(jsonfile, "w")
+ csvdata = list(csv.reader(csvfile))
+ fieldnames = csvdata[0]
+ # use 'OrderedDict' to keep fields order
+ jsondata = [ OrderedDict(zip(fieldnames, row)) for row in csvdata[1:] ]
+ csvfile.close()
+ if jsonfile is None:
+ return jsondata
+ else:
+ # 'ensure_ascii=False' to support UTF-8
+ json.dump(jsondata, jsonfile, ensure_ascii=False, indent=4)
+ jsonfile.close()
+
+
+def main():
+ # command line options & arguments
+ parser = argparse.ArgumentParser(
+ description="Simple CSV to JSON convertor")
+ parser.add_argument("-V", "--version", action="version",
+ version="%(prog)s " + "%s (%s)" % (__version__, __date__))
+ parser.add_argument("csvfile", help="Input CSV file")
+ parser.add_argument("jsonfile", nargs="?", default=sys.stdout,
+ help="Output JSON file (default stdout)")
+ args = parser.parse_args()
+
+ csv2json(args.csvfile, args.jsonfile)
+
+
+if __name__ == "__main__":
+ main()
+
diff --git a/cli/jpegs2pdf.sh b/cli/jpegs2pdf.sh
new file mode 100755
index 0000000..6d42bab
--- /dev/null
+++ b/cli/jpegs2pdf.sh
@@ -0,0 +1,42 @@
+#!/bin/sh
+#
+#############################################################################
+#
+# Shellscript to convert a set of JPEG files to a multipage PDF.
+#
+# Requirements: (1) Ghostscript needs to be installed on the local system.
+# (2) ImageMagick needs to be installed on the local system.
+#
+# Usage: jpegs2pdf.sh output.pdf file1.jpeg [file2.jpeg [file2.jpeg [...]]]
+#
+# Copyright (c) 2007, <pipitas@gmail.com>
+# Use, distribute and modify without any restrictions.
+#
+# Versions:
+# v1.0.0, Jul 12 2007: initial version
+# v1.0.1, Jan 07 2011: set viewJPEG.ps path (self-compiled GS 9.02)
+#
+#############################################################################
+
+if [ $# -eq 0 ]; then
+ echo "Usage:"
+ echo " `basename $0` output.pdf 1.jpg ..."
+ exit 1
+fi
+
+outfile=$1
+shift
+
+param=""
+for i in "$@" ; do
+ dimension=$(identify -format "%[fx:(w)] %[fx:(h)]" "${i}")
+ param="${param} <</PageSize [${dimension}]>> setpagedevice (${i}) viewJPEG showpage"
+done
+
+gs \
+ -sDEVICE=pdfwrite \
+ -dPDFSETTINGS=/prepress \
+ -o "$outfile" \
+ viewjpeg.ps \
+ -c "${param}"
+
diff --git a/cli/pdfmerge.sh b/cli/pdfmerge.sh
new file mode 100755
index 0000000..aef72db
--- /dev/null
+++ b/cli/pdfmerge.sh
@@ -0,0 +1,23 @@
+#!/bin/sh
+#
+# Merge multiple PDFs with pdftk.
+#
+# Ref:
+# Merging Multiple PDFs under GNU/Linux
+# https://blog.dbrgn.ch/2013/8/14/merge-multiple-pdfs/
+#
+# Weitian LI
+# 2015/01/23
+#
+
+if [ $# -lt 2 ]; then
+ printf "Usage: `basename $0` out.pdf in1.pdf ...\n"
+ exit 1
+fi
+
+outpdf="$1"
+shift
+
+echo "Input files: $@"
+pdftk "$@" cat output "${outpdf}"
+
diff --git a/cli/shrinkpdf.sh b/cli/shrinkpdf.sh
new file mode 100755
index 0000000..1190fce
--- /dev/null
+++ b/cli/shrinkpdf.sh
@@ -0,0 +1,56 @@
+#!/bin/sh
+#
+# Shrink the size of PDF files by adjust its quality using gs.
+#
+# Aaron LI
+# 2013/09/18
+#
+
+case "$1" in
+ -[hH]*|--[hH]*)
+ printf "usage:\n"
+ printf " `basename $0` in=<input.pdf> out=<output.pdf> quality=<screen|ebook|printer|prepress> imgdpi=<img_dpi>\n"
+ exit 1
+ ;;
+esac
+
+getopt_keyval() {
+ until [ -z "$1" ]
+ do
+ key=${1%%=*} # extract key
+ val=${1#*=} # extract value
+ keyval="${key}=\"${val}\""
+ echo "## getopt: eval '${keyval}'"
+ eval ${keyval}
+ shift # shift, process next one
+ done
+}
+getopt_keyval "$@"
+
+if [ -z "${in}" ] || [ -z "${out}" ]; then
+ printf "Error: 'in' or 'out' not specified\n"
+ exit 2
+fi
+quality=${quality:-ebook}
+imgdpi=${imgdpi:-120}
+
+printf "# in: ${in}
+# out: ${out}
+# quality: ${quality}
+# imgdpi: ${imgdpi}\n"
+
+gs -dNOPAUSE -dBATCH -dSAFER \
+ -sDEVICE=pdfwrite \
+ -dCompatibilityLevel=1.4 \
+ -dPDFSETTINGS="/${quality}" \
+ -dEmbedAllFonts=true \
+ -dSubsetFonts=true \
+ -dColorImageDownsampleType=/Bicubic \
+ -dColorImageResolution=${imgdpi} \
+ -dGrayImageDownsampleType=/Bicubic \
+ -dGrayImageResolution=${imgdpi} \
+ -dMonoImageDownsampleType=/Bicubic \
+ -dMonoImageResolution=${imgdpi} \
+ -sOutputFile=${out} \
+ ${in}
+
diff --git a/cli/term_color.sh b/cli/term_color.sh
new file mode 100755
index 0000000..f10f916
--- /dev/null
+++ b/cli/term_color.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+#
+# This file echoes a bunch of color codes to the
+# terminal to demonstrate what's available. Each
+# line is the color code of one forground color,
+# out of 17 (default + 16 escapes), followed by a
+# test use of that color on all nine background
+# colors (default + 8 escapes).
+#
+# Ref: https://wiki.archlinux.org/index.php/X_resources
+#
+
+T='gYw' # The test text
+
+echo -e "\n 40m 41m 42m 43m\
+ 44m 45m 46m 47m";
+
+for FGs in ' m' ' 1m' ' 30m' '1;30m' ' 31m' '1;31m' ' 32m' \
+ '1;32m' ' 33m' '1;33m' ' 34m' '1;34m' ' 35m' '1;35m' \
+ ' 36m' '1;36m' ' 37m' '1;37m';
+ do FG=${FGs// /}
+ echo -en " $FGs \033[$FG $T "
+ for BG in 40m 41m 42m 43m 44m 45m 46m 47m;
+ do echo -en "$EINS \033[$FG\033[$BG $T \033[0m";
+ done
+ echo;
+done
+echo
diff --git a/cli/term_color_2.sh b/cli/term_color_2.sh
new file mode 100755
index 0000000..4dc2ef2
--- /dev/null
+++ b/cli/term_color_2.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+# Original: http://frexx.de/xterm-256-notes/
+# http://frexx.de/xterm-256-notes/data/colortable16.sh
+# Modified by Aaron Griffin
+# and further by Kazuo Teramoto
+FGNAMES=(' black ' ' red ' ' green ' ' yellow' ' blue ' 'magenta' ' cyan ' ' white ')
+BGNAMES=('DFT' 'BLK' 'RED' 'GRN' 'YEL' 'BLU' 'MAG' 'CYN' 'WHT')
+
+echo " ┌──────────────────────────────────────────────────────────────────────────┐"
+for b in {0..8}; do
+ ((b>0)) && bg=$((b+39))
+
+ echo -en "\033[0m ${BGNAMES[b]} │ "
+
+ for f in {0..7}; do
+ echo -en "\033[${bg}m\033[$((f+30))m ${FGNAMES[f]} "
+ done
+
+ echo -en "\033[0m │"
+ echo -en "\033[0m\n\033[0m │ "
+
+ for f in {0..7}; do
+ echo -en "\033[${bg}m\033[1;$((f+30))m ${FGNAMES[f]} "
+ done
+
+ echo -en "\033[0m │"
+ echo -e "\033[0m"
+
+ ((b<8)) &&
+ echo " ├──────────────────────────────────────────────────────────────────────────┤"
+done
+echo " └──────────────────────────────────────────────────────────────────────────┘"
diff --git a/cli/term_color_3.sh b/cli/term_color_3.sh
new file mode 100755
index 0000000..85b499a
--- /dev/null
+++ b/cli/term_color_3.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+# Original: http://frexx.de/xterm-256-notes/
+# http://frexx.de/xterm-256-notes/data/colortable16.sh
+# Modified by Aaron Griffin
+# and further by Kazuo Teramoto
+
+
+FGNAMES=(' black ' ' red ' ' green ' ' yellow' ' blue ' 'magenta' ' cyan ' ' white ')
+BGNAMES=('DFT' 'BLK' 'RED' 'GRN' 'YEL' 'BLU' 'MAG' 'CYN' 'WHT')
+echo " ----------------------------------------------------------------------------"
+for b in $(seq 0 8); do
+ if [ "$b" -gt 0 ]; then
+ bg=$(($b+39))
+ fi
+
+ echo -en "\033[0m ${BGNAMES[$b]} : "
+ for f in $(seq 0 7); do
+ echo -en "\033[${bg}m\033[$(($f+30))m ${FGNAMES[$f]} "
+ done
+ echo -en "\033[0m :"
+
+ echo -en "\033[0m\n\033[0m : "
+ for f in $(seq 0 7); do
+ echo -en "\033[${bg}m\033[1;$(($f+30))m ${FGNAMES[$f]} "
+ done
+ echo -en "\033[0m :"
+ echo -e "\033[0m"
+
+ if [ "$b" -lt 8 ]; then
+ echo " ----------------------------------------------------------------------------"
+ fi
+done
+echo " ----------------------------------------------------------------------------"
diff --git a/cli/unzip-gbk.py b/cli/unzip-gbk.py
new file mode 100755
index 0000000..423e10f
--- /dev/null
+++ b/cli/unzip-gbk.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# unzip-gbk.py
+#
+# http://note.ninehills.info/linux-gbk.html
+#
+
+import os
+import sys
+import zipfile
+
+print "Processing File " + sys.argv[1]
+
+file=zipfile.ZipFile(sys.argv[1],"r");
+for name in file.namelist():
+ utf8name=name.decode('gbk')
+ print "Extracting " + utf8name
+ pathname = os.path.dirname(utf8name)
+ if not os.path.exists(pathname) and pathname!= "":
+ os.makedirs(pathname)
+ data = file.read(name)
+ if not os.path.exists(utf8name):
+ fo = open(utf8name, "w")
+ fo.write(data)
+ fo.close
+file.close()
diff --git a/cli/vimpager b/cli/vimpager
new file mode 100755
index 0000000..447fd9a
--- /dev/null
+++ b/cli/vimpager
@@ -0,0 +1,85 @@
+#!/bin/sh
+
+# Script for using ViM as a PAGER.
+# Based on Bram's less.sh.
+# Version 1.3
+
+file="$@"
+if [ -z "$file" ]; then file="-"; fi
+
+if uname -s | grep -iq cygwin; then
+ cygwin=1
+elif uname -s | grep -iq linux; then
+ linux=1
+elif uname -s | grep -iq sunos; then
+ solaris=1
+else
+ bsd=1
+fi
+
+less_vim() {
+ vim -R \
+ -c 'let no_plugin_maps = 1' \
+ -c 'set scrolloff=999' \
+ -c 'runtime! macros/less.vim' \
+ -c 'set foldlevel=999' \
+ -c 'set mouse=h' \
+ -c 'set nonu' \
+ -c 'nmap <ESC>u :nohlsearch<cr>' \
+ "$@"
+}
+
+do_ps() {
+ if [ $solaris ]; then
+ ps -u `id -u` -o pid,comm=
+ elif [ $bsd ]; then
+ ps -U `id -u` -o pid,comm=
+ else
+ ps fuxw
+ fi
+}
+
+pproc() {
+ if [ $linux ]; then
+ ps -p $1 -o comm=
+ elif [ $cygwin ]; then
+ ps -p $1 | sed -e 's/^I/ /' | grep -v PID
+ else
+ ps -p $1 -o comm= | grep -v PID
+ fi
+}
+
+ppid() {
+ if [ $linux ]; then
+ ps -p $1 -o ppid=
+ elif [ $cygwin ]; then
+ ps -p $1 | sed -e 's/^I/ /' | grep -v PID | awk '{print $2}'
+ else
+ ps -p $1 -o ppid= | grep -v PID
+ fi
+}
+
+# Check if called from man, perldoc or pydoc
+if do_ps | grep -q '\(py\(thon\|doc\)\|man\|perl\(doc\)\?\([0-9.]*\)\?\)\>'; then
+ proc=$$
+ while next_parent=`ppid $proc` && [ $next_parent != 1 ]; do
+ if pproc $next_parent | grep -q 'man\>'; then
+ cat $file | sed -e 's/\[[^m]*m//g' | sed -e 's/.//g' | less_vim -c 'set ft=man' -; exit
+ elif pproc $next_parent | grep -q 'py\(thon\|doc\)\>'; then
+ cat $file | sed -e 's/\[[^m]*m//g' | sed -e 's/.//g' | less_vim -c 'set ft=man' -; exit
+ elif pproc $next_parent | grep -q 'perl\(doc\)\?\([0-9.]*\)\?\>'; then
+ cat $file | sed -e 's/.//g' | less_vim -c 'set ft=man' -; exit
+ fi
+ proc=$next_parent
+ done
+fi
+
+less_vim "$file"
+
+# CONTRIBUTORS:
+#
+# Rafael Kitover
+# Antonio Ospite
+# Jean-Marie Gaillourdet
+# Perry Hargrave
+# Koen Smits
diff --git a/cluster/kMeans.py b/cluster/kMeans.py
new file mode 100644
index 0000000..f4868c6
--- /dev/null
+++ b/cluster/kMeans.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Credit: Machine Learning in Action: Chapter 10
+#
+# Aaron LI
+# 2015/06/23
+#
+
+"""
+k-means clustering algorithm
+"""
+
+
+import numpy as np
+
+
+def loadDataSet(fileName):
+ dataMat = []
+ fr = open(fileName)
+ for line in fr.readlines():
+ curLine = line.strip().split('\t')
+ fltLine = list(map(float, curLine))
+ dataMat.append(fltLine)
+ return np.array(dataMat)
+
+
+def distEclud(vecA, vecB):
+ return np.sqrt(np.sum(np.power(vecA - vecB, 2)))
+
+
+def randCent(dataSet, k):
+ n = np.shape(dataSet)[1]
+ centroids = np.zeros((k, n))
+ for j in range(n):
+ minJ = np.min(dataSet[:, j])
+ rangeJ = float(np.max(dataSet[:, j]) - minJ)
+ centroids[:, j] = minJ + rangeJ * np.random.rand(k)
+ return centroids
+
+
+def kMeans(dataSet, k, distMeas=distEclud, createCent=randCent):
+ m = np.shape(dataSet)[0]
+ clusterAssment = np.zeros((m, 2))
+ centroids = createCent(dataSet, k)
+ clusterChanged = True
+ iterations = 0
+ while clusterChanged:
+ clusterChanged = False
+ iterations += 1
+ for i in range(m):
+ minDist = np.inf
+ minIndex = -1
+ # to find the nearest centroid
+ for j in range(k):
+ distJI = distMeas(centroids[j, :], dataSet[i, :])
+ if distJI < minDist:
+ minDist = distJI
+ minIndex = j
+ if clusterAssment[i, 0] != minIndex:
+ clusterChanged = True
+ clusterAssment[i, :] = minIndex, minDist**2
+ #print(centroids)
+ for cent in range(k):
+ ptsInClust = dataSet[np.nonzero(clusterAssment[:, 0] == cent)]
+ centroids[cent, :] = np.mean(ptsInClust, axis=0)
+ result = {
+ 'k': k,
+ 'centroids': centroids,
+ 'labels': clusterAssment[:, 0].astype(int),
+ 'distance2': clusterAssment[:, 1],
+ 'accessment': clusterAssment,
+ 'iterations': iterations
+ }
+ return result
+
diff --git a/img/force_field_transform.jl b/img/force_field_transform.jl
new file mode 100644
index 0000000..1b3872a
--- /dev/null
+++ b/img/force_field_transform.jl
@@ -0,0 +1,135 @@
+#!/usr/bin/env julia
+# -*- coding: utf-8 -*-
+#
+# Force field transform
+#
+# Aaron LI
+# 2015/07/14
+#
+
+using FITSIO;
+#include("../julia/ndgrid.jl");
+
+@everywhere function meshgrid(vx, vy)
+ m, n = length(vy), length(vx)
+ vx = reshape(vx, 1, n)
+ vy = reshape(vy, m, 1)
+ (repmat(vx, m, 1), repmat(vy, 1, n))
+end
+
+
+# Calculate the forces between the specified point with respect to the image.
+@everywhere function force(p0, img)
+ img = copy(img);
+ x0, y0 = p0;
+ v0 = img[y0, x0];
+ img[y0, x0] = 0.0;
+ rows, cols = size(img);
+ x, y = meshgrid(1:cols, 1:rows);
+ x[y0, x0] = -1;
+ y[y0, x0] = -1;
+ f_x = v0 .* img .* (x-x0) ./ ((x-x0).^2 + (y-y0).^2).^1.5;
+ f_y = v0 .* img .* (y-y0) ./ ((x-x0).^2 + (y-y0).^2).^1.5;
+ #return (f_x, f_y);
+ return (sum(f_x), sum(f_y));
+end
+
+
+# Perform the "force field transform" for the input image.
+#
+# Return:
+# (amplitudes, angles)
+# amplitudes: the amplitudes of the resulting forces of each pixel
+# angles: the directions of the resulting forces of each pixel,
+# in unit radian.
+@everywhere function force_field_transform_serial(img, rowstart=1, rowend="end")
+ rows, cols = size(img)
+ if rowend == "end"
+ rowend = rows
+ end
+ amplitudes = zeros(rows, cols)
+ angles = zeros(rows, cols)
+ t0 = time()
+ t_p = t0 + 30 # in 30 seconds
+ for y = rowstart:rowend
+ for x = 1:cols
+ t1 = time()
+ if (t1 >= t_p)
+ percent = 100*((y-rowstart)*cols + x+1) / ((rowend-rowstart+1)*cols)
+ @printf("Worker #%d: progress: %.3f%%; %.1f min\n",
+ myid(), percent, (t1-t0)/60.0)
+ t_p += 30 # in 30 seconds
+ end
+ F_x, F_y = force((x, y), img)
+ #@printf("F_x, F_y = (%f, %f)\n", F_x, F_y);
+ amplitudes[y, x] = sqrt(F_x^2 + F_y^2)
+ angles[y, x] = atan2(F_y, F_x)
+ end
+ end
+ t1 = time()
+ @printf("Worker #%d: finished in %.1f min!\n", myid(), (t1-t0)/60.0)
+ return (amplitudes, angles)
+end
+
+
+# parallel-capable
+function force_field_transform(img)
+ t0 = time()
+ rows, cols = size(img)
+ np = nprocs()
+ amplitudes = cell(np)
+ angles = cell(np)
+ # split rows for each process
+ rows_chunk = div(rows, np)
+ rowstart = cell(np)
+ rowend = cell(np)
+ @sync begin
+ for p = 1:np
+ rowstart[p] = 1 + rows_chunk * (p-1)
+ if p == np
+ rowend[p] = rows
+ else
+ rowend[p] = rowstart[p] + rows_chunk - 1
+ end
+ # perform transform
+ @async begin
+ amplitudes[p], angles[p] = remotecall_fetch(p,
+ force_field_transform_serial,
+ img, rowstart[p], rowend[p])
+ end
+ end
+ end
+ t1 = time()
+ @printf("Finished in %.1f min!\n", (t1-t0)/60.0)
+ return (sum(amplitudes), sum(angles))
+end
+
+
+# arguments
+#println(ARGS);
+if length(ARGS) != 3
+ println("Usage: PROG <input_fits_img> <out_fits_amplitudes> <out_fits_angles>");
+ exit(1);
+end
+
+infile = ARGS[1];
+outfile_ampl = ARGS[2];
+outfile_angles = ARGS[3];
+
+fits_img = FITS(infile);
+img = read(fits_img[1]);
+header = read_header(fits_img[1]);
+
+# perform force field transform
+ampl, angles = force_field_transform(img);
+
+outfits_ampl = FITS(outfile_ampl, "w");
+outfits_angles = FITS(outfile_angles, "w");
+write(outfits_ampl, ampl; header=header);
+write(outfits_angles, angles; header=header);
+
+close(fits_img);
+close(outfits_ampl);
+close(outfits_angles);
+
+#= vim: set ts=8 sw=4 tw=0 fenc=utf-8 ft=julia: =#
diff --git a/img/force_field_transform.py b/img/force_field_transform.py
new file mode 100644
index 0000000..2b185c8
--- /dev/null
+++ b/img/force_field_transform.py
@@ -0,0 +1,126 @@
+# -*- coding: utf -*-
+#
+# Force field transform (Hurley et al., 2002, 2005)
+#
+
+"""
+Force field transform
+"""
+
+import sys
+import time
+import numpy as np
+
+
+def force(p1, p2):
+ """
+ The force between two points of the image.
+
+ Arguments:
+ p1, p2: (value, x, y)
+
+ Return:
+ # (force, angle): value and direction of the force.
+ # angle: (-pi, pi], with respect to p1.
+ (f_x, f_y): x and y components of the force
+ """
+ v1, x1, y1 = p1
+ v2, x2, y2 = p2
+ #force = v1*v2 / ((x1-x2)**2 + (y1-y2)**2)
+ #angle = np.atan2(y2-y1, x2-x1)
+ #return (force, angle)
+ f_x = v1 * v2 * (x2-x1) / ((x2-x1)**2 + (y2-y1)**2)**1.5
+ f_y = v1 * v2 * (y2-y1) / ((x2-x1)**2 + (y2-y1)**2)**1.5
+ return (f_x, f_y)
+
+
+def force_array(p0, img):
+ """
+ The forces between the input point with respect to the image.
+
+ Arguments:
+ p0: (x, y), note (x, y) start with zero.
+ img: input image, a numpy array
+
+ Return:
+ (f_x, f_y): x and y components of the forces of the same size
+ of the input image
+ """
+ x0, y0 = p0
+ v0 = img[y0, x0]
+ img[y0, x0] = 0.0
+ x, y = np.meshgrid(range(img.shape[1]), range(img.shape[0]))
+ x[y0, x0] = -1
+ y[y0, x0] = -1
+ f_x = v0 * img * (x-x0) / ((x-x0)**2 + (y-y0)**2)**1.5
+ f_y = v0 * img * (y-y0) / ((x-x0)**2 + (y-y0)**2)**1.5
+ return (f_x, f_y)
+
+
+def vector_add(v1, v2):
+ """
+ Add two vectors and return the results.
+
+ Arguments:
+ v1, v2: two input vectors of format (f_x, f_y)
+
+ Return:
+ (F_x, F_y)
+ """
+ f1_x, f1_y = v1
+ f2_x, f2_y = v2
+ return (f1_x+f2_x, f1_y+f2_y)
+
+
+def force_summation(pixel, img):
+ """
+ Calculate the resulting force of the specified pixel with respect to
+ the image.
+
+ Argument:
+ pixel: the position (x, y) of the pixel to be calculated
+ img: the input image
+
+ Return:
+ (F_x, F_y): x and y components of the resulting force.
+ """
+ img = np.array(img)
+ x0, y0 = pixel
+ f_x, f_y = force_array((x0, y0), img)
+ return (f_x.sum(), f_y.sum())
+
+
+def force_field_transform(img):
+ """
+ Perform the "force field transform" on the input image.
+
+ Arguments:
+ img: input 2D image
+
+ Return:
+ (amplitudes, angles)
+ amplitudes: the amplitudes of the resulting forces of each pixel
+ angles: the directions of the resulting forces of each pixel,
+ in unit radian.
+ """
+ img = np.array(img)
+ amplitudes = np.zeros(img.shape)
+ angles = np.zeros(img.shape)
+ rows, cols = img.shape
+ t0 = time.time()
+ t_p = t0 + 30 # in 30 seconds
+ for y in range(rows):
+ for x in range(cols):
+ t1 = time.time()
+ if t1 >= t_p:
+ percent = 100 * (y*cols + x + 1) / (rows * cols)
+ print("progress: %.3f%%; %.1f min" % (percent, (t1-t0)/60.0),
+ file=sys.stderr)
+ t_p += 30 # in 30 seconds
+ f_x, f_y = force_array((x, y), img)
+ F_x, F_y = f_x.sum(), f_y.sum()
+ amplitudes[y, x] = np.sqrt(F_x**2 + F_y**2)
+ angles[y, x] = np.math.atan2(F_y, F_x)
+ return (amplitudes, angles)
+
+
diff --git a/img/force_field_transform_fft.jl b/img/force_field_transform_fft.jl
new file mode 100644
index 0000000..c5bf905
--- /dev/null
+++ b/img/force_field_transform_fft.jl
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+#
+# To do force field transform using FFT
+#
+# Aaron LI
+# 2015/07/16
+#
+
+function forcefieldtransform_fft(img)
+ rows, cols = size(img)
+ pic = zeros(3*rows, 3*cols)
+ pic[1:rows, 1:cols] = img
+ # unit force field
+ unit_ff = complex(zeros(3*rows, 3*cols))
+ for r = 1:(2*rows-1)
+ for c = 1:(2*cols)
+ d = (rows+cols*im) - (r+c*im)
+ if (r, c) == (rows, cols)
+ unit_ff[r, c] = 0 + 0im
+ else
+ unit_ff[r, c] = d / abs(d)^3
+ end
+ end
+ end
+ # FIXME matrix sizes
+ ff = sqrt(rows*cols) * ifft(fft(pic) .* fft(unit_ff))
+ #ff_crop = ff[rows:2*rows, cols:2*cols]
+end
+
diff --git a/img/forcefield.jl b/img/forcefield.jl
new file mode 100644
index 0000000..bf2c236
--- /dev/null
+++ b/img/forcefield.jl
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+#
+# Force field transform with specified size of mask.
+#
+# Aaron LI
+# 2015/07/19
+#
+
+# Make the specified sized force field mask.
+# NOTE: the number of rows and cols must be odd.
+function ff_mask(rows=5, cols=5)
+ rows % 2 == 1 || error("rows must be odd number")
+ cols % 2 == 1 || error("cols must be odd number")
+ mask = complex(zeros(rows, cols))
+ for r = range(-div(rows, 2), rows)
+ for c = range(-div(cols, 2), cols)
+ i, j = r + div(rows+1, 2), c + div(cols+1, 2)
+ #@printf("(r,c) = (%d,%d); (i,j) = (%d,%d)\n", r, c, i, j)
+ d = c + r*im
+ if abs(d) < 1e-8
+ mask[i, j] = 0.0
+ else
+ mask[i, j] = d / abs(d)^3
+ end
+ end
+ end
+ return mask / sum(abs(mask))
+end
+
+
+# Padding image by specified number of rows and cols.
+# Default padding mode: mirror
+function pad_image(img, pad_rows, pad_cols, mode="mirror")
+ rows, cols = size(img)
+ rows_new, cols_new = rows + 2*pad_rows, cols + 2*pad_cols
+ img_pad = zeros(rows_new, cols_new)
+ img_pad[(pad_rows+1):(pad_rows+rows), (pad_cols+1):(pad_cols+cols)] = img
+ for r = 1:rows_new
+ for c = 1:cols_new
+ if mode == "mirror"
+ if r <= pad_rows
+ r_mirror = 2*(pad_rows+1) - r
+ elseif r <= pad_rows+rows
+ r_mirror = r
+ else
+ r_mirror = 2*(pad_rows+rows) - r
+ end
+ if c <= pad_cols
+ c_mirror = 2*(pad_cols+1) - c
+ elseif c <= pad_cols+cols
+ c_mirror = c
+ else
+ c_mirror = 2*(pad_cols+cols) - c
+ end
+ if (r_mirror, c_mirror) != (r, c)
+ #@printf("(%d,%d) <= (%d,%d)\n", r, c, r_mirror, c_mirror)
+ img_pad[r, c] = img_pad[r_mirror, c_mirror]
+ end
+ else
+ error("mode not supported")
+ end
+ end
+ end
+ return img_pad
+end
+
+
+# Perform force field transform for the image.
+function ff_transform(img, mask, mode="mirror")
+ rows, cols = size(img)
+ mask_rows, mask_cols = size(mask)
+ pad_rows, pad_cols = div(mask_rows, 2), div(mask_cols, 2)
+ img_pad = pad_image(img, pad_rows, pad_cols)
+ # result images
+ ff_amplitudes = zeros(rows, cols)
+ ff_angles = zeros(rows, cols)
+ # calculate transformed values
+ for r = (pad_rows+1):(pad_rows+rows)
+ for c = (pad_cols+1):(pad_cols+cols)
+ force = sum(img_pad[r, c] * img_pad[(r-pad_rows):(r+pad_rows), (c-pad_cols):(c+pad_cols)] .* mask)
+ ff_amplitudes[r-pad_rows, c-pad_cols] = abs(force)
+ ff_angles[r-pad_rows, c-pad_cols] = angle(force)
+ end
+ end
+ return ff_amplitudes, ff_angles
+end
+
diff --git a/julia/anisodiff.jl b/julia/anisodiff.jl
new file mode 100644
index 0000000..e01f6db
--- /dev/null
+++ b/julia/anisodiff.jl
@@ -0,0 +1,101 @@
+# -*- coding: utf-8 -*-
+#
+# ANISODIFF - Anisotropic diffusion
+#
+# Usage:
+# diff = anisodiff(img, niter, kappa, lambda, option)
+#
+# Arguments:
+# | img - input image (2D grayscale)
+# | niter - number of iterations
+# | kappa - conduction coefficient (gradient modulus threshold)
+# | This parameter controls conduction as a function of gradient.
+# | If kappa is low, small intensity gradients are able to block
+# | conduction and hence diffusion across step edges. A large value
+# | reduces the influence of intensity gradients on conduction.
+# | lambda - integration constant for stability (0 <= lambda <= 0.25)
+# | This parameter controls the diffusion speed, and you
+# | usually want it at the maximum value of 0.25.
+# | default value: 0.25
+# | option - conduction coefficient functions proposed by Perona & Malik:
+# | 1: c(x, y, t) = exp(-(nablaI/kappa).^2)
+# | privileges high-contrast edges over low-contrast ones
+# | 2: c(x, y, t) = 1 ./ (1 + (nablaI/kappa).^2)
+# | privileges wide regions over smaller ones
+# | default value: 1
+#
+# Returns:
+# | diff - anisotropic diffused image
+#
+# Reference:
+# [1] P. Perona and J. Malik.
+# Scale-space and edge detection using ansotropic diffusion.
+# IEEE Transactions on Pattern Analysis and Machine Intelligence,
+# 12(7):629-639, July 1990.
+# https://dx.doi.org/10.1109%2F34.56205
+#
+# Credits:
+# [1] Peter Kovesi
+# pk@peterkovesi.com
+# MATLAB and Octave Functions for Computer Vision and Image Processing
+# http://www.peterkovesi.com/matlabfns/Spatial/anisodiff.m
+# --
+# June 2000 original version
+# March 2002 corrected diffusion eqn No 2.
+# [2] Daniel Lopes
+# Anisotropic Diffusion (Perona & Malik)
+# http://www.mathworks.com/matlabcentral/fileexchange/14995-anisotropic-diffusion--perona---malik-
+#
+#
+# Aaron LI <aaronly.me@gmail.com>
+# 2015/07/17
+#
+
+include("calc_k_percentile.jl");
+
+function anisodiff(img, niter, k=calc_k_percentile, lambda=0.25, option=1)
+ diff = float(img)
+ rows, cols = size(diff)
+
+ for i = 1:niter
+ println("anisodiff - iteration: ", i)
+
+ # Construct diffl which is the same as diff but
+ # has an extra padding of zeros around it.
+ diffl = zeros(rows+2, cols+2)
+ diffl[2:rows+1, 2:cols+1] = diff
+
+ # North, South, East and West differences
+ deltaN = diffl[1:rows, 2:cols+1] - diff
+ deltaS = diffl[3:rows+2, 2:cols+1] - diff
+ deltaE = diffl[2:rows+1, 3:cols+2] - diff
+ deltaW = diffl[2:rows+1, 1:cols] - diff
+
+ # Calculate the kappa
+ if isa(k, Function)
+ kappa = k(diff)
+ else
+ kappa = k
+ end
+
+ println(" kappa: ", kappa)
+
+ # Conduction
+ if option == 1
+ cN = exp(-(deltaN/kappa).^2)
+ cS = exp(-(deltaS/kappa).^2)
+ cE = exp(-(deltaE/kappa).^2)
+ cW = exp(-(deltaW/kappa).^2)
+ elseif option == 2
+ cN = 1 ./ (1 + (deltaN/kappa).^2)
+ cS = 1 ./ (1 + (deltaS/kappa).^2)
+ cE = 1 ./ (1 + (deltaE/kappa).^2)
+ cW = 1 ./ (1 + (deltaW/kappa).^2)
+ end
+
+ diff += lambda * (cN.*deltaN + cS.*deltaS + cE.*deltaE + cW.*deltaW)
+ end
+
+ return diff
+end
+
diff --git a/julia/calc_k_percentile.jl b/julia/calc_k_percentile.jl
new file mode 100644
index 0000000..36b15e1
--- /dev/null
+++ b/julia/calc_k_percentile.jl
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+#
+# Calculate the percentile of the gradient image, which
+# used as the 'kappa' parameter of the anisotropic diffusion.
+#
+# Credits:
+# [1] KAZE: nldiffusion_functions.cpp / compute_k_percentile()
+#
+# Aaron LI
+# 2015/07/20
+#
+
+include("scharr.jl");
+
+function calc_k_percentile(img, percent=0.7, nbins=300)
+ rows, cols = size(img)
+ # derivatives of the image
+ img_gx = scharr(img, 1, 0)
+ img_gy = scharr(img, 0, 1)
+ img_modg = sqrt(img_gx.^2 + img_gy.^2)
+ # histogram
+ hmax = maximum(img_modg)
+ hist_e, hist_counts = hist(reshape(img_modg, length(img_modg)), nbins)
+ hist_cum = cumsum(hist_counts)
+ # find the percent of the histogram percentile
+ npoints = sum(img_modg .> 0.0)
+ nthreshold = npoints * percent
+ k = sum(hist_cum .<= nthreshold)
+ kperc = (k == length(hist_cum)) ? 0.03 : (hmax * k / nbins)
+ return kperc
+end
+
diff --git a/julia/forcefield.jl b/julia/forcefield.jl
new file mode 100644
index 0000000..bf2c236
--- /dev/null
+++ b/julia/forcefield.jl
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+#
+# Force field transform with specified size of mask.
+#
+# Aaron LI
+# 2015/07/19
+#
+
+# Make the specified sized force field mask.
+# NOTE: the number of rows and cols must be odd.
+function ff_mask(rows=5, cols=5)
+ rows % 2 == 1 || error("rows must be odd number")
+ cols % 2 == 1 || error("cols must be odd number")
+ mask = complex(zeros(rows, cols))
+ for r = range(-div(rows, 2), rows)
+ for c = range(-div(cols, 2), cols)
+ i, j = r + div(rows+1, 2), c + div(cols+1, 2)
+ #@printf("(r,c) = (%d,%d); (i,j) = (%d,%d)\n", r, c, i, j)
+ d = c + r*im
+ if abs(d) < 1e-8
+ mask[i, j] = 0.0
+ else
+ mask[i, j] = d / abs(d)^3
+ end
+ end
+ end
+ return mask / sum(abs(mask))
+end
+
+
+# Padding image by specified number of rows and cols.
+# Default padding mode: mirror
+function pad_image(img, pad_rows, pad_cols, mode="mirror")
+ rows, cols = size(img)
+ rows_new, cols_new = rows + 2*pad_rows, cols + 2*pad_cols
+ img_pad = zeros(rows_new, cols_new)
+ img_pad[(pad_rows+1):(pad_rows+rows), (pad_cols+1):(pad_cols+cols)] = img
+ for r = 1:rows_new
+ for c = 1:cols_new
+ if mode == "mirror"
+ if r <= pad_rows
+ r_mirror = 2*(pad_rows+1) - r
+ elseif r <= pad_rows+rows
+ r_mirror = r
+ else
+ r_mirror = 2*(pad_rows+rows) - r
+ end
+ if c <= pad_cols
+ c_mirror = 2*(pad_cols+1) - c
+ elseif c <= pad_cols+cols
+ c_mirror = c
+ else
+ c_mirror = 2*(pad_cols+cols) - c
+ end
+ if (r_mirror, c_mirror) != (r, c)
+ #@printf("(%d,%d) <= (%d,%d)\n", r, c, r_mirror, c_mirror)
+ img_pad[r, c] = img_pad[r_mirror, c_mirror]
+ end
+ else
+ error("mode not supported")
+ end
+ end
+ end
+ return img_pad
+end
+
+
+# Perform force field transform for the image.
+function ff_transform(img, mask, mode="mirror")
+ rows, cols = size(img)
+ mask_rows, mask_cols = size(mask)
+ pad_rows, pad_cols = div(mask_rows, 2), div(mask_cols, 2)
+ img_pad = pad_image(img, pad_rows, pad_cols)
+ # result images
+ ff_amplitudes = zeros(rows, cols)
+ ff_angles = zeros(rows, cols)
+ # calculate transformed values
+ for r = (pad_rows+1):(pad_rows+rows)
+ for c = (pad_cols+1):(pad_cols+cols)
+ force = sum(img_pad[r, c] * img_pad[(r-pad_rows):(r+pad_rows), (c-pad_cols):(c+pad_cols)] .* mask)
+ ff_amplitudes[r-pad_rows, c-pad_cols] = abs(force)
+ ff_angles[r-pad_rows, c-pad_cols] = angle(force)
+ end
+ end
+ return ff_amplitudes, ff_angles
+end
+
diff --git a/julia/ndgrid.jl b/julia/ndgrid.jl
new file mode 100644
index 0000000..688a246
--- /dev/null
+++ b/julia/ndgrid.jl
@@ -0,0 +1,52 @@
+# This file is a part of Julia. License is MIT: http://julialang.org/license
+
+ndgrid(v::AbstractVector) = copy(v)
+
+function ndgrid{T}(v1::AbstractVector{T}, v2::AbstractVector{T})
+ m, n = length(v1), length(v2)
+ v1 = reshape(v1, m, 1)
+ v2 = reshape(v2, 1, n)
+ (repmat(v1, 1, n), repmat(v2, m, 1))
+end
+
+function ndgrid_fill(a, v, s, snext)
+ for j = 1:length(a)
+ a[j] = v[div(rem(j-1, snext), s)+1]
+ end
+end
+
+function ndgrid{T}(vs::AbstractVector{T}...)
+ n = length(vs)
+ sz = map(length, vs)
+ out = ntuple(i->Array(T, sz), n)
+ s = 1
+ for i=1:n
+ a = out[i]::Array
+ v = vs[i]
+ snext = s*size(a,i)
+ ndgrid_fill(a, v, s, snext)
+ s = snext
+ end
+ out
+end
+
+meshgrid(v::AbstractVector) = meshgrid(v, v)
+
+function meshgrid{T}(vx::AbstractVector{T}, vy::AbstractVector{T})
+ m, n = length(vy), length(vx)
+ vx = reshape(vx, 1, n)
+ vy = reshape(vy, m, 1)
+ (repmat(vx, m, 1), repmat(vy, 1, n))
+end
+
+function meshgrid{T}(vx::AbstractVector{T}, vy::AbstractVector{T},
+ vz::AbstractVector{T})
+ m, n, o = length(vy), length(vx), length(vz)
+ vx = reshape(vx, 1, n, 1)
+ vy = reshape(vy, m, 1, 1)
+ vz = reshape(vz, 1, 1, o)
+ om = ones(Int, m)
+ on = ones(Int, n)
+ oo = ones(Int, o)
+ (vx[om, :, oo], vy[:, on, oo], vz[om, on, :])
+end
diff --git a/julia/scharr.jl b/julia/scharr.jl
new file mode 100644
index 0000000..02daeb6
--- /dev/null
+++ b/julia/scharr.jl
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+#
+# Calculate the derivatives of an image using the Scharr operator
+# of kernal size 3x3.
+#
+# References:
+# [1] https://en.wikipedia.org/wiki/Sobel_operator
+# [2] http://docs.opencv.org/doc/tutorials/imgproc/imgtrans/sobel_derivatives/sobel_derivatives.html
+#
+# Aaron LI
+# 2015/07/20
+#
+
+# Calculate the derivatives of the image using the Scharr operator
+# img - input image
+# dx - order of the derivative x
+# dy - order of the derivative y
+function scharr(img, dx, dy)
+ rows, cols = size(img)
+ img_d = float(img)
+ (isa(dx, Int) && dx >= 0) || error("dx should be an integer >= 0")
+ (isa(dy, Int) && dy >= 0) || error("dy should be an integer >= 0")
+ # Scharr operator
+ Gy = [-3.0 -10.0 -3.0; 0.0 0.0 0.0; 3.0 10.0 3.0];
+ Gx = Gy'
+ # calculate the derivatives using convolution
+ for i = 1:dx
+ img_d = conv2(img_d, Gx)
+ end
+ for i = 1:dy
+ img_d = conv2(img_d, Gy)
+ end
+ # FIXME: 'conv2' will increase the image size
+ rows_d, cols_d = size(img_d)
+ return img_d[(div(rows_d-rows, 2)+1):(div(rows_d-rows, 2)+rows), (div(cols_d-cols, 2)+1):(div(cols_d-cols, 2)+cols)]
+end
+
diff --git a/matlab/radialpsd.m b/matlab/radialpsd.m
new file mode 100644
index 0000000..cff4f9e
--- /dev/null
+++ b/matlab/radialpsd.m
@@ -0,0 +1,83 @@
+%
+% radialpsd - to calculate the radial power spectrum density
+% of the given 2d image
+%
+% Credits:
+% [1] Evan Ruzanski
+% Radially averaged power spectrum of 2D real-valued matrix
+% https://www.mathworks.com/matlabcentral/fileexchange/23636-radially-averaged-power-spectrum-of-2d-real-valued-matrix
+%
+% Arguments:
+% img - input 2d image (grayscale)
+% step - radius step between each consecutive two circles
+%
+% Return:
+% psd - vector contains the power at each frequency
+% psd_sdd - vector of the corresponding standard deviation
+%
+
+function [psd, psd_std] = radialpsd(img, step)
+ [N M] = size(img)
+
+ %% Compute power spectrum
+ imgf = fftshift(fft2(img))
+ % Normalize by image size
+ imgfp = (abs(imgf) / (N*M)) .^ 2
+
+ %% Adjust PSD size: padding to make a square matrix
+ dimDiff = abs(N-M)
+ dimMax = max(N, M)
+ % To make square matrix
+ if N > M
+ % More rows than columns
+ if ~mod(dimDiff, 2)
+ % Even difference
+ % Pad columns to match dimension
+ imgfp = [NaN(N,dimDiff/2) imgfp NaN(N,dimDiff/2)]
+ else
+ % Odd difference
+ imgfp = [NaN(N,floor(dimDiff/2)) imgfp NaN(N,floor(dimDiff/2)+1)]
+ end
+ elseif N < M
+ % More columns than rows
+ if ~mod(dimDiff, 2)
+ % Even difference
+ % Pad rows to match dimensions
+ imgfp = [NaN(dimDiff/2,M); imgfp; NaN(dimDiff/2,M)]
+ else
+ % Pad rows to match dimensions
+ imgfp = [NaN(floor(dimDiff/2),M); imgfp; NaN(floor(dimDiff/2)+1,M)]
+ end
+ end
+
+ % Only consider one half of spectrum (due to symmetry)
+ halfDim = floor(dimMax/2) + 1
+
+ %% Compute radially average power spectrum
+ % Make Cartesian grid
+ [X Y] = meshgrid(-dimMax/2:dimMax/2-1, -dimMax/2:dimMax/2-1)
+ % Convert to polar coordinate axes
+ [theta rho] = cart2pol(X, Y)
+ rho = round(rho)
+ i = cell(floor(dimMax/2)+1, 1)
+ for r = 0:floor(dimMax/2)
+ i{r+1} = find(rho == r)
+ end
+ % calculate the radial mean power and its standard deviation
+ Pf = zeros(2, floor(dimMax/2)+1)
+ for r = 0:floor(dimMax/2)
+ Pf(1, r+1) = nanmean(imgfp(i{r+1}))
+ Pf(2, r+1) = nanstd(imgfp(i{r+1}))
+ end
+
+ % adapt to the given step size
+ psd = zeros(1, floor(size(Pf, 2) / step))
+ psd_std = zeros(size(psd))
+ for k = 1:length(psd)
+ psd(i) = mean(Pf(1, (k*step-step+1):(k*step)))
+ % approximately calculate the merged standard deviation
+ psd_std(i) = sqrt(mean(Pf(2, (k*step-step+1):(k*step)) .^ 2))
+ end
+end
+
+% vim: set ts=8 sw=4 tw=0 fenc=utf-8 ft=matlab: %
diff --git a/r/chisq.R b/r/chisq.R
new file mode 100644
index 0000000..41f851f
--- /dev/null
+++ b/r/chisq.R
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+#
+# Calculate the chi-squared values between the given data and model values.
+#
+
+calc.chisq <- function(value, error=NULL, model=NULL) {
+ if (is.data.frame(value)) {
+ df <- value
+ value <- df$value
+ error <- df$error
+ model <- df$model
+ }
+ chisq <- (value - model)^2
+ if (! is.null(error)) {
+ weights <- error ^ (-2)
+ chisq <- chisq * weights
+ }
+ return(sum(chisq))
+}
+
+# vim: set ts=8 sw=4 tw=0 fenc=utf-8 ft=r: #
diff --git a/r/fillpois.R b/r/fillpois.R
new file mode 100644
index 0000000..be590f8
--- /dev/null
+++ b/r/fillpois.R
@@ -0,0 +1,130 @@
+# -*- coding: utf-8 -*-
+#
+# Identify the abnormal pixels (with relatively larger values) in the
+# given X-ray count image, and replace their values with random Poisson
+# values whose parameter lambda is determined by the neighboring pixels.
+#
+#
+# Aaron LI
+# 2015/09/01
+# Updated: 2015/09/02
+#
+
+# Fill a vector of a row/column of X-ray image with Poisson distribution.
+#
+# The parameter lambda of the Poisson distribution is determined by the
+# mean value of the left n (default: 3) elements.
+#
+# If the value of one element is greater than or equal to (>=)
+# qpois(prob, lambda), then its value is replaced with rpois(1, lambda).
+#
+# Arguments:
+# vec - input data vector
+# n - number of elements used to calculate the lambda (default: 3)
+# prob - quantile probability (default: 95%)
+#
+# Return:
+# a vector of the same length with abnormal values replaced
+fill.pois.vec <- function(vec, n=3, prob=0.95) {
+ # Select the given index of element from the vector
+ # Arguments:
+ # bc - boundary condition:
+ # + "cyclic": vec[0, -1] := vec[length(vec), length(vec)-1]
+ # + "symmetric": vec[0, -1] := vec[1, 2]
+ elem <- function(vec, index, bc="cyclic") {
+ if (index <= 0) {
+ if (bc == "cyclic") {
+ index <- length(vec) + index
+ } else if (bc == "symmetric") {
+ index <- 1 - index
+ } else {
+ stop(paste("Invalid boundary condition:", bc, "\n"))
+ }
+ }
+ return(vec[index])
+ }
+ # Calculate the mean value of left n elements for the given element.
+ mean.left <- function(vec, index, n=3, bc="cyclic") {
+ elements <- get(class(vec))(n)
+ for (i in 1:n) {
+ elements[i] <- elem(vec, index-i)
+ }
+ return(mean(elements))
+ }
+ #
+ vec.filled <- vec
+ for (i in 1:length(vec)) {
+ lambda <- mean.left(vec, i, n)
+ if (vec[i] >= qpois(prob, lambda)) {
+ vec.filled[i] <- rpois(1, lambda)
+ }
+ }
+ return(vec.filled)
+}
+
+
+# Fill a matrix of the X-ray image with Poisson distribution by row or column.
+#
+# For more details, see 'fill.pois.vec()'.
+#
+# Arguments:
+# mat - input image matrix
+# byrow - where Poisson fill the matrix by row (default by column)
+# n - number of elements used to calculate the lambda (default: 3)
+# prob - quantile probability (default: 95%)
+#
+# Return:
+# a matrix of the same size with abnormal values replaced
+fill.pois.mat <- function(mat, byrow=FALSE, n=3, prob=0.95) {
+ mat.filled <- mat
+ if (byrow) {
+ # process by row
+ rows <- nrow(mat)
+ for (r in 1:rows) {
+ vec <- mat[r, ]
+ vec.filled <- fill.pois.vec(vec, n, prob)
+ mat.filled[r, ] <- vec.filled
+ }
+ } else {
+ # process by column
+ cols <- ncol(mat)
+ for (c in 1:cols) {
+ vec <- mat[, c]
+ vec.filled <- fill.pois.vec(vec, n, prob)
+ mat.filled[, c] <- vec.filled
+ }
+ }
+ return(mat.filled)
+}
+
+
+# Identify the abnormal pixels (with relatively larger values) in the
+# given X-ray count image, and replace their values with random Poisson
+# values whose parameter lambda is determined by the neighboring pixels.
+#
+# The refilled image is the average of the two images, which are the
+# original image processed with 'fill.pois.mat()' by row and column,
+# respectively.
+#
+# TODO: to verify???
+# The two-step procedure is employed to avoid the grid-like pattern/structure
+# in the refilled image.
+#
+# For more details, see 'fill.pois.vec()'.
+#
+# Arguments:
+# img - input image (a matrix)
+# n - number of elements used to calculate the lambda (default: 3)
+# prob - quantile probability (default: 95%)
+#
+# Return:
+# a matrix of the same size with abnormal values replaced
+fill.pois.img <- function(img, n=3, prob=0.95) {
+ img.fillbycol <- fill.pois.mat(img, byrow=FALSE, n=n, prob=prob)
+ img.fillbyrow <- fill.pois.mat(img, byrow=TRUE, n=n, prob=prob)
+ img.filled <- (img.fillbycol + img.fillbyrow) / 2
+ return(img.filled)
+}
+
+
+# vim: set ts=8 sw=4 tw=0 fenc=utf-8 ft=r: #
diff --git a/r/fitdistrplus-example.R b/r/fitdistrplus-example.R
new file mode 100644
index 0000000..b5b1a57
--- /dev/null
+++ b/r/fitdistrplus-example.R
@@ -0,0 +1,54 @@
+n <- 50
+m <- 50
+set.seed(1)
+mu <- -0.4
+sig <- 0.12
+x <- matrix(data=rlnorm(n*m, mu, sig), nrow=m)
+
+library(fitdistrplus)
+## Fit a log-normal distribution to the 50 random data set
+f <- apply(x, 2, fitdist, "lnorm")
+
+## Plot the results
+for(i in 1:n)
+plot(f[[i]])
+
+## Save plot in an animated GIF-file
+library(animation)
+saveGIF({for(i in 1:n) plot(f[[i]])})
+
+apply((sapply(f, "[[", "estimate")),1, summary)
+# meanlog sdlog
+# Min. -0.4347 0.09876
+# 1st Qu. -0.4140 0.11480
+# Median -0.4010 0.12110
+# Mean -0.4011 0.12270
+# 3rd Qu. -0.3899 0.12950
+# Max. -0.3522 0.14780
+
+
+## How much variance can we expect in the mean and std?
+## Expeted mean
+ExpectedMean <- function(mu, sig) exp(mu+ sig^2/2)
+## Expected std
+ExpectedStd <- function(mu, sig) sqrt((exp(sig^2)-1)*exp(2*mu + sig^2))
+
+summary(apply(sapply(f, "[[", "estimate"), 2, function(x) ExpectedMean(x[1], x[2])))
+# Min. 1st Qu. Median Mean 3rd Qu. Max.
+# 0.6529 0.6665 0.6747 0.6748 0.6819 0.7087
+summary(apply(sapply(f, "[[", "estimate"), 2, function(x) ExpectedStd(x[1], x[2])))
+# Min. 1st Qu. Median Mean 3rd Qu. Max.
+# 0.06604 0.07880 0.08212 0.08316 0.08794 0.10170
+
+## Let's look at the goodness of fit statistics to get an
+## idea how much variance we can expect there:
+gof.ln <- lapply(f, gofstat)
+gof.test <- lapply(gof.ln, function(x) data.frame(x[c("chisqpvalue", "cvm", "ad", "ks")]))
+apply(do.call("rbind", gof.test), 2, summary)
+# chisqpvalue cvm ad ks
+# Min. 0.0002673 0.02117 0.1537 0.05438
+# 1st Qu. 0.1394000 0.03755 0.2708 0.07488
+# Median 0.3578000 0.04841 0.3216 0.08054
+# Mean 0.3814000 0.05489 0.3564 0.08431
+# 3rd Qu. 0.6409000 0.06913 0.4358 0.09436
+# Max. 0.9245000 0.13220 0.7395 0.12570 \ No newline at end of file
diff --git a/r/forceFieldTransform.R b/r/forceFieldTransform.R
new file mode 100644
index 0000000..92310c1
--- /dev/null
+++ b/r/forceFieldTransform.R
@@ -0,0 +1,46 @@
+# -*- coding: utf -*-
+#
+# Calculate the "force field transform" of the image, using the
+# specified *cell* size.
+#
+# The image is padded with the mirrored boundary condition.
+#
+# NOTE:TODO:
+# The transformation output strengths image is NOT normalized!
+#
+#
+# Credit:
+# [1] TODO:
+# Hurley et al., 2002, 2005
+#
+#
+# Aaron LI
+# 2015/08/28
+#
+
+
+# The attractive force between two points on the image.
+# NOTE: the coefficient is ignored
+#
+# Arguments:
+# p0, p1 - (r, c, value), the row and column number of the point position,
+# and the value of that point
+#
+# Return:
+# the force vector (f_r, f_c):
+# 'f_r': force along the row direction, positive goes downside
+# 'f_c': force along the column direction, positive goes to the right
+# Note that this is the force that 'p1' act on 'p0', and is directed
+# to point 'p1'.
+p2p.force <- function(p0, p1) {
+ r0 = p0[1]
+ c0 = p0[2]
+ r1 = p1[1]
+ c1 = p1[2]
+ f_r = p0[3]*p1[3] * (r1-r0) / ((r1-r0)^2 + (c1-c0)^2)^1.5
+ f_c = p0[3]*p1[3] * (c1-c0) / ((r1-r0)^2 + (c1-c0)^2)^1.5
+ return(c(f_r, f_c))
+}
+
+
+# vim: set ts=8 sw=4 tw=0 fenc=utf-8 ft=r: #
diff --git a/r/kldiv.R b/r/kldiv.R
new file mode 100644
index 0000000..a767038
--- /dev/null
+++ b/r/kldiv.R
@@ -0,0 +1,349 @@
+# -*- coding: utf-8 -*-
+#
+# Kullback-Leibler or Jensen-Shannon divergence between two distributions
+#
+# The Kullback-Leibler divergence is given by:
+# D_{KL}(P(x), Q(x)) = sum[ P(x) * log(P(x) / Q(x)) ]
+# where P(x) is the underground true distribution, and Q(x) the approximation
+# distribution. Thus KL divergence measures the information lost when Q is
+# used to approximate P.
+#
+# The Jensen-Shannon divergence is given by:
+# D_{JS}(P, Q) = 0.5 * D_{KL}(P, M) + 0.5 * D_{KL}(Q, M); M = (P+Q)/2
+# This is a symmetrised divergence, and is equal to 1/2 the so-called
+# Jeffrey divergence.
+#
+# Credits:
+# [1] Wikipedia - Kullback-Leibler divergence
+# https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
+# [2] David Fass, KLDIV
+# http://www.mathworks.com/matlabcentral/fileexchange/13089-kldiv/content//kldiv.m
+#
+# Aaron LI
+# 2015/09/04
+#
+
+
+# Calculate the entropy of the probability mass distribution.
+# The zeros are ignored.
+#
+# Arguments:
+# x - probability mass distribution
+#
+# Return:
+# entropy in unit "bits"
+#
+calc.entropy <- function(x) {
+ x.gt0 <- x[x>0]
+ return(sum(-x.gt0 * log2(x.gt0)))
+}
+
+
+# Calculate the KL divergence of distribution P from Q, or the JS divergence
+# between the P and Q distributions.
+#
+# TODO:
+# * to add other methods to deal with zero probabilities:
+# - add eps to p.m.f and renormalize
+# - Byesian prior
+# - smoothing
+#
+# Credits:
+# [1] Calculate the Kullback-Leibler Divergence in practice?
+# http://stats.stackexchange.com/questions/97938/calculate-the-kullback-leibler-divergence-in-practice
+# [2] How to compute KL-divergence when PMF contains 0s?
+# http://mathoverflow.net/questions/72668/how-to-compute-kl-divergence-when-pmf-contains-0s
+#
+# Arguments:
+# p - probabilities representing the distribution P (underground true)
+# q - probabilities representing the distribution Q (approximation)
+# type - which type of divergence to be calculated
+# + "kl": (default) Kullback-Leibler divergence
+# + "klsym": symmetric variant of the Kullback-Leibler divergence,
+# which given by (KL(p, q) + KL(q, p))/2
+# + "js": Jensen-Shannon divergence
+# zeros - how to deal with the zeros in each distribution probabilities
+# + "ignore": just ignore the data points with probability of zero
+#
+# Note that the vectors p and q must have the same length, and the
+# sum of probabilities p and q must be 1 +/- 1e-5
+#
+# Return:
+# calculate divergence value in unit "bits"
+#
+kldiv <- function(p, q, type="kl", zeros="ignore") {
+ # check length of vectors
+ stopifnot(length(p) == length(q))
+ # validate probabilities
+ eps_prob <- 1e-5
+ stopifnot(abs(sum(p) - 1) <= eps_prob, abs(sum(q) - 1) <= eps_prob)
+ # how to deal with zero probabilities
+ if (zeros == "ignore") {
+ # just ignore the zeros in probabilities
+ nonzeros <- (p > 0) & (q > 0)
+ p <- p[nonzeros]
+ q <- q[nonzeros]
+ } else {
+ stop(paste("Unsupported parameter value zeros=", zeros, "\n", sep=""))
+ }
+ # check divergence type
+ if (type == "kl") {
+ # Kullback-Leibler divergence
+ div <- sum(p * (log2(p) - log2(q)))
+ } else if (type == "klsym") {
+ # symmetric variant KL divergence
+ div <- 0.5 * (sum(p * (log2(p) - log2(q))) +
+ sum(q * (log2(q) - log2(p))))
+ } else if (type == "js") {
+ # Jensen-Shannon divergence
+ m <- (p + q) / 2
+ div <- 0.5 * (sum(p * (log2(p) - log2(m))) +
+ sum(q * (log2(q) - log2(m))))
+ } else {
+ stop(paste("Unsupported parameter value type=", type, "\n", sep=""))
+ }
+ return(div)
+}
+
+
+# Estimate the probability mass distribution for the observation data,
+# using "density()".
+# The range of output coordinates of points is set to be:
+# from: min(x) - cut*bw
+# to: max(x) + cut*bw
+# And the probability mass distribution is normalized.
+#
+# Arguments:
+# x - input observation data
+# n - number of equally spaced points at which the probability mass is
+# to be estimated.
+# bw - bandwidth to be used
+# kernel - the smoothing kernel
+# from - coordinate of the left-most point
+# to - coordinate of the right-most point
+# cut - c(left, right). Number of bandwidths beyond the left and right
+# extremes of the input data.
+# This allows the estimated density to drop to approximately zero
+# at the extremes.
+# If "from" and "to" specified, then "cut" is ignored.
+#
+# Returns:
+# list with following components:
+# x - the coordinates of the points where probability mass estimated
+# y - the estimated probability mass
+# bw - bandwidth used
+# kernel - kernel used
+# n - sample size
+# cut - left and right cut used
+# from - coordinate of the left-most point used
+# to - coordinate of the right-most point used
+#
+estimate.prob.mass <- function(x, bw="nrd0", kernel="gaussian", n=512,
+ from=NULL, to=NULL, cut=c(3,3)) {
+ data <- x[!is.na(x)]
+ # calculate the bandwidth
+ bw <- get(paste("bw.", bw, sep=""))(data)
+ # determine the estimation range
+ if (is.null(from)) {
+ from <- min(data) - cut[1] * bw
+ }
+ if (is.null(to)) {
+ to <- max(data) + cut[2] * bw
+ }
+ # estimate with "density()"
+ d <- density(data, bw=bw, kernel=kernel, n=n, from=from, to=to)
+ # renormalize the probability mass distribution
+ pmass <- d$y / sum(d$y)
+ prob.mass <- list(x=d$x, y=pmass, bw=bw, kernel=kernel,
+ n=n, from=from, to=to, cut=cut)
+ return(prob.mass)
+}
+
+
+# Estimate the probability mass distribution for the source and corresponding
+# background data using 'estimate.prob.mass()'.
+#
+# The coordinates at which the probability masses are estimated are the same
+# for the source and corresponding background probability mass distributions.
+# Therefore we can calculate the KL divergence between these two distributions.
+#
+# Argument:
+# srcdata - raw counts data drawn from the source region
+# bkgdata - raw counts data drawn from the background region
+#
+# Return:
+# data.frame with the following components:
+# x - the coordinates of the points where probability mass estimated
+# src - the estimated probability masses of the source data
+# bkg - the estimated probability masses of the background data
+#
+pm.src.bkg <- function(srcdata, bkgdata) {
+ # compare the data ranges
+ if (max(srcdata) > max(bkgdata)) {
+ pm.src <- estimate.prob.mass(srcdata)
+ from <- pm.src$from
+ to <- pm.src$to
+ pm.bkg <- estimate.prob.mass(bkgdata, from=from, to=to)
+ } else {
+ pm.bkg <- estimate.prob.mass(bkgdata)
+ from <- pm.bkg$from
+ to <- pm.bkg$to
+ pm.src <- estimate.prob.mass(srcdata, from=from, to=to)
+ }
+ df <- data.frame(x=pm.src$x, src=pm.src$y, bkg=pm.bkg$y)
+ return(df)
+}
+
+
+# Calculate the entropies and KL/JS divergences of the source and background
+# probability mass distribution group.
+#
+# Arguments:
+# pmdf - data.frame of the probability mass distribution
+# comp - components to be calculated
+# + "entropy": entropy of the source and background
+# + "kl": KL divergences from source to background and vice versa
+# + "klsym": symmetric variant of KL divergence
+# + "js": JS divergence
+#
+# Return:
+# list with following components:
+# entropy.src - entropy of the source distribution
+# entropy.bkg - entropy of the background distribution
+# kl.src2bkg - KL divergence from source to background
+# kl.bkg2src - KL divergence from background to source
+# klsym - symmetric variant KL divergence
+# js - JS divergence
+info.src.bkg <- function(pmdf, comp=c("entropy", "kl", "klsym", "js")) {
+ pm.src <- pmdf$src
+ pm.bkg <- pmdf$bkg
+ entropy.src <- NULL
+ entropy.bkg <- NULL
+ kl.src2bkg <- NULL
+ kl.bkg2src <- NULL
+ klsym <- NULL
+ js <- NULL
+ if ("entropy" %in% comp) {
+ entropy.src <- calc.entropy(pm.src)
+ entropy.bkg <- calc.entropy(pm.bkg)
+ }
+ if ("kl" %in% comp) {
+ kl.src2bkg <- kldiv(pm.src, pm.bkg, type="kl")
+ kl.bkg2src <- kldiv(pm.bkg, pm.src, type="kl")
+ }
+ if ("klsym" %in% comp) {
+ klsym <- kldiv(pm.src, pm.bkg, type="klsym")
+ }
+ if ("js" %in% comp) {
+ js <- kldiv(pm.src, pm.bkg, type="js")
+ }
+ return(list(entropy.src=entropy.src, entropy.bkg=entropy.bkg,
+ kl.src2bkg=kl.src2bkg, kl.bkg2src=kl.bkg2src,
+ klsym=klsym, js=js))
+}
+
+
+# Calculate the entropies and KL/JS divergences of the source density
+# histogram with respect to the corresponding background data which
+# drawn from the estimated Poisson mass distribution.
+#
+# Arguments:
+# src - raw counts data of the source region
+# comp - components to be calculated
+# + "entropy": entropy of the source and background
+# + "kl": KL divergences from source to background and vice versa
+# + "klsym": symmetric variant of KL divergence
+# + "js": JS divergence
+#
+# Return:
+# list with following components:
+# entropy.src - entropy of the source distribution
+# entropy.bkg - entropy of the background distribution
+# kl.src2bkg - KL divergence from source to background
+# kl.bkg2src - KL divergence from background to source
+# klsym - symmetric variant KL divergence
+# js - JS divergence
+#
+info.src.pois <- function(src, comp=c("entropy", "kl", "klsym", "js")) {
+ # make the density histogram of the source counts data
+ hist.src <- hist(src, breaks=(min(src):(max(src)+1)-0.5), plot=FALSE)
+ x <- hist.src$mids
+ pm.src <- hist.src$density
+ # calculate the corresponding theoretical Poisson density/mass distribution
+ # as the estimated background
+ lambda <- mean(src)
+ pm.pois <- dpois(x, lambda)
+ pm.pois <- pm.pois / sum(pm.pois)
+ # calculate the entropy, KL/JS divergences
+ entropy.src <- NULL
+ entropy.bkg <- NULL
+ kl.src2bkg <- NULL
+ kl.bkg2src <- NULL
+ klsym <- NULL
+ js <- NULL
+ if ("entropy" %in% comp) {
+ entropy.src <- calc.entropy(pm.src)
+ entropy.bkg <- calc.entropy(pm.pois)
+ }
+ if ("kl" %in% comp) {
+ kl.src2bkg <- kldiv(pm.src, pm.pois, type="kl")
+ kl.bkg2src <- kldiv(pm.pois, pm.src, type="kl")
+ }
+ if ("klsym" %in% comp) {
+ klsym <- kldiv(pm.src, pm.pois, type="klsym")
+ }
+ if ("js" %in% comp) {
+ js <- kldiv(pm.src, pm.pois, type="js")
+ }
+ return(list(entropy.src=entropy.src, entropy.bkg=entropy.bkg,
+ kl.src2bkg=kl.src2bkg, kl.bkg2src=kl.bkg2src,
+ klsym=klsym, js=js))
+}
+
+
+# Calculate the information (e.g., entropy, divergences) for each group of
+# region data.
+# If the background data are not provided, then the background is estimated
+# with a Poisson density/mass distribution.
+info.reglist <- function(srcdatalist, bkgdatalist=NULL) {
+ if (is.null(bkgdatalist)) {
+ infofunc <- "info.src.pois"
+ } else {
+ infofunc <- "info.src.bkg"
+ stopifnot(length(srcdatalist) == length(bkgdatalist))
+ }
+ l <- length(srcdatalist)
+ infodf <- data.frame(entropy.src=numeric(l), entropy.bkg=numeric(l),
+ kl.src2bkg=numeric(l), kl.bkg2src=numeric(l),
+ klsym=numeric(l), js=numeric(l))
+ for (i in 1:length(srcdatalist)) {
+ #cat(i, "\n")
+ if (is.null(bkgdatalist)) {
+ if (sum(srcdatalist[[i]]) == 0) {
+ # srcdata all zeros
+ cat(i, ": WARNING: srcdata are all zeros!\n")
+ info <- list(entropy.src=NA, entropy.bkg=NA,
+ kl.src2bkg=NA, kl.bkg2src=NA,
+ klsym=NA, js=NA)
+ } else {
+ info <- get(infofunc)(srcdatalist[[i]])
+ }
+ } else {
+ if (sum(srcdatalist[[i]]) == 0 || sum(bkgdatalist[[i]]) == 0) {
+ # srcdata / bkgdata all zeros
+ cat(i, ": WARNING: srcdata/bkgdata are all zeros!\n")
+ info <- list(entropy.src=NA, entropy.bkg=NA,
+ kl.src2bkg=NA, kl.bkg2src=NA,
+ klsym=NA, js=NA)
+ } else {
+ pmdf <- pm.src.bkg(srcdatalist[[i]], bkgdatalist[[i]])
+ info <- get(infofunc)(pmdf)
+ }
+ }
+ infodf[i, ] <- info
+ }
+ return(infodf)
+}
+
+
+# vim: set ts=8 sw=4 tw=0 fenc=utf-8 ft=r: #
diff --git a/r/lsos.R b/r/lsos.R
new file mode 100644
index 0000000..16a3e7f
--- /dev/null
+++ b/r/lsos.R
@@ -0,0 +1,45 @@
+# -*- encoding: utf-8 -*-
+
+# Tricks to manage the available memory in an R session
+# http://stackoverflow.com/q/1358003/4856091
+
+# improved list of objects
+.ls.objects <- function(pos=1, pattern, order.by,
+ decreasing=FALSE, pretty.size=FALSE,
+ head=FALSE, n=10) {
+ napply <- function(names, fn) {
+ sapply(names, function(x) fn(get(x, pos=pos)))
+ }
+ names <- ls(pos=pos, pattern=pattern)
+ obj.class <- napply(names, function(x) as.character(class(x))[1])
+ obj.mode <- napply(names, mode)
+ obj.type <- ifelse(is.na(obj.class), obj.mode, obj.class)
+ obj.size.bytes <- napply(names, object.size)
+ if (pretty.size) {
+ obj.size <- napply(names, function(x) {
+ format(object.size(x), units="auto")
+ })
+ } else {
+ obj.size <- obj.size.bytes
+ }
+ obj.dim <- t(napply(names, function(x) as.numeric(dim(x))[1:2]))
+ vec <- is.na(obj.dim)[, 1] & (obj.type != "function")
+ obj.dim[vec, 1] <- napply(names, length)[vec]
+ out <- data.frame(obj.type, obj.size, obj.dim)
+ names(out) <- c("Type", "Size", "Rows", "Columns")
+ if (! missing(order.by))
+ if (order.by == "Size") {
+ out <- out[order(obj.size.bytes, decreasing=decreasing), ]
+ } else {
+ out <- out[order(out[[order.by]], decreasing=decreasing), ]
+ }
+ if (head)
+ out <- head(out, n)
+ out
+}
+# shorthand
+lsobjs <- function(..., n=10) {
+ .ls.objects(..., order.by="Size", decreasing=TRUE,
+ pretty.size=TRUE, head=TRUE, n=n)
+}
+
diff --git a/r/scaleSaliency.R b/r/scaleSaliency.R
new file mode 100644
index 0000000..80172bc
--- /dev/null
+++ b/r/scaleSaliency.R
@@ -0,0 +1,246 @@
+# -*- coding: utf-8 -*-
+#
+# Scale Saliency algorithm
+#
+# Reference:
+# [1] T. Kadir & M. Brady, Saliency, Scale and Image Description.
+# 2001, International Journal of Computer Vision, 45(2), 83-105
+#
+# Aaron LI
+# 2015/07/29
+#
+
+
+# Calculate Shannon entropy from histogram of probability densities.
+# Arguments:
+# phist - histogram of probability density
+# Return:
+# entropy value
+calcShannonEntropy <- function(phist) {
+ # Helper function to calculate the entropy
+ # Arguments:
+ # p - probability density of each bin
+ # Return:
+ # p * log(p) if p > 0, otherwise 0
+ plogp <- function(p) {
+ if (p < 1e-10) {
+ return(0.0)
+ } else {
+ return(p * log(p))
+ }
+ }
+ # calculate the entropy
+ entropy <- -sum(sapply(phist, plogp))
+ return(entropy)
+}
+
+
+# Generate the template circular regions for each scale.
+# Arguments:
+# scale_min - minimum scale (radius of circle)
+# scale_max - maximum scale
+# Return:
+# list of matrix which represent the regions of interest (with value of TRUE)
+circleROI <- function(scale_min, scale_max) {
+ rows <- 2 * scale_max + 1
+ cols <- rows
+ rc <- (rows + 1) / 2 # central row
+ cc <- (cols + 1) / 2 # central col
+ roi <- list()
+ for (s in scale_min:scale_max) {
+ radius2 <- s^2
+ m <- matrix(0, nrow=rows, ncol=cols)
+ roi[[paste("scale", s, sep="")]] <-
+ ifelse(((row(m)-rc)^2 + (col(m)-cc)^2) <= radius2,
+ TRUE, FALSE)
+ }
+ return(roi)
+}
+
+
+# Calculate the scale saliencies for the 1D case: scalar image
+# Arguments:
+# img - input *scalar* image
+# scale_min - minimum scale (pixels of radius of circle)
+# scale_max - maximum scale (NOTE: scale_max >= scale_min+2)
+# nbins - how many bins used for histogram
+# progressbar - whether to display the progress bar
+# Return:
+# 6-column data.frame contains the scale saliencies results
+calcScaleSaliency1D <- function(img, scale_min, scale_max, nbins,
+ progressbar=TRUE) {
+ # check scale range first: must have at least 3 scales
+ stopifnot(scale_max >= scale_min+2)
+ # get number of rows and columns
+ rows <- nrow(img)
+ cols <- ncol(img)
+ # determine the saliency calculation region of the image
+ # FIXME: how to deal with the boundaries???
+ row_begin <- scale_max + 1
+ col_begin <- scale_max + 1
+ row_end <- rows - scale_max
+ col_end <- cols - scale_max
+ # templates of regions for each scale
+ roi <- circleROI(scale_min, scale_max)
+ # R data frame to store the saliency results
+ scaleSaliency <- data.frame(row=numeric(0), col=numeric(0),
+ scale=numeric(0), entropy=numeric(0),
+ disimilarity=numeric(0), saliency=numeric(0))
+ # determine the breakpoints for histogram
+ hist_breaks <- (0:nbins) * (max(img) - min(img))/nbins + min(img)
+ if (progressbar) {
+ # progress bar
+ pb <- txtProgressBar(min=row_begin, max=row_end, style=3)
+ }
+ for (ri in row_begin:row_end) {
+ if (progressbar) {
+ # update progress bar
+ setTxtProgressBar(pb, ri)
+ }
+ for (ci in col_begin:col_end) {
+ # filter out the required size of image block, which is
+ # used to calculate its histogram, entropy, etc.
+ imgROI <- img[(ri-scale_max):(ri+scale_max),
+ (ci-scale_max):(ci+scale_max)]
+ # vectors to store entropies and distances
+ entropy <- numeric(scale_max-scale_min+1)
+ distance <- numeric(scale_max-scale_min+1)
+ # initial probability density for scale of 's-1'
+ scaleHistPr0 <- rep(0, nbins)
+ for (s in scale_min:scale_max) {
+ scaleROI <- roi[[paste("scale", s, sep="")]]
+ # NOTE: do not use 'breaks=nbins', since the number is a
+ # suggestion only and breakpoints will be set to 'prtty'
+ # values in this case.
+ scaleHist <- hist(imgROI[scaleROI],
+ breaks=hist_breaks, plot=FALSE)
+ scaleHistPr <- scaleHist$counts / sum(scaleHist$counts)
+ # calculate Shannon entropy
+ entropy[s-scale_min+1] <- calcShannonEntropy(scaleHistPr)
+ # FIXME: calculate distance of scales???
+ distance[s-scale_min+1] <- sum(abs(scaleHistPr-scaleHistPr0))
+ # save the probability density of current scale 's'
+ scaleHistPr0 <- scaleHistPr
+ }
+ # smooth the 'distance' vector to reduce the impacts of noise
+ distance1 <- c(distance[1], distance[1:(length(distance)-1)])
+ distance2 <- c(distance[2:length(distance)],
+ distance[length(distance)])
+ distance <- (distance1 + distance + distance2) / 3
+ # find the peaks of entropy, and the corresponding scales
+ peakScale <- c(FALSE,
+ ((entropy[2:(length(entropy)-1)] >
+ entropy[1:(length(entropy)-2)]) &
+ (entropy[2:(length(entropy)-1)] >
+ entropy[3:length(entropy)])),
+ FALSE)
+ #cat("peakScale:", peakScale, "\n")
+ # calculate the inter-scale saliencies for each entropy peaks
+ for (s in (scale_min:scale_max)[peakScale]) {
+ scaleNorm <- s*s / (2*s - 1)
+ scaleEntropy <- entropy[s-scale_min+1]
+ disimilarity <- scaleNorm * distance[s-scale_min+1]
+ saliency <- scaleEntropy * disimilarity
+ scaleSaliency[nrow(scaleSaliency)+1, ] <- list(ri, ci, s,
+ scaleEntropy,
+ disimilarity,
+ saliency)
+ }
+ }
+ }
+ if (progressbar) {
+ # close progress bar
+ close(pb)
+ }
+ return(scaleSaliency)
+}
+
+
+# Simple greedy clustering algorithm to filter out salient regions.
+# Arguments:
+# ssaliency - saliency results from 'calcScaleSaliency*'
+# ssaliency_th - inter-scale saliency threshold
+# disimilarity_th - disimilarity threshold
+# Return:
+# clustered & filtered saliency regions
+greedyCluster <- function(ssaliency, ssaliency_th, disimilarity_th) {
+ # filter by global saliency & inter-scale saliency threshold
+ ssaliency <- ssaliency[((ssaliency$saliency > ssaliency_th) &
+ (ssaliency$disimilarity > disimilarity_th)), ]
+ # sort in descending inter-scale saliency
+ ssaliency <- ssaliency[order(-ssaliency$saliency), ]
+ # cluster salienct points
+ clusteredSaliency <- ssaliency[NULL, ]
+ while (nrow(ssaliency) > 0) {
+ ss <- ssaliency[1, ]
+ clusteredSaliency[nrow(clusteredSaliency)+1, ] <- ss
+ distance2 <- (ssaliency$row - ss$row)^2 + (ssaliency$col - ss$col)^2
+ # filter out the points inside the current salient circle
+ ssaliency <- ssaliency[(distance2 > ss$scale^2), ]
+ }
+ return(clusteredSaliency)
+}
+
+
+# Plot the image and salient regions with ggplot2
+# Arguments:
+# img - input image
+# saliency - saliency restults by clusteredSaliency()
+plotSalientReg <- function(img, saliency) {
+ require(reshape2)
+ require(ggplot2)
+ plotCircle <- function(xc, yc, radius) {
+ theta <- seq(0, 2*pi, length.out=100)
+ gcircle <- annotate("path",
+ x=xc+radius*cos(theta),
+ y=yc+radius*sin(theta),
+ colour="green")
+ return(gcircle)
+ }
+ # plot the image
+ gp <- ggplot(melt(img), aes(Var2, -Var1, fill=value)) + geom_raster()
+ # add circles
+ for (i in 1:nrow(saliency)) {
+ ss <- saliency[i, ]
+ gcircle <- plotCircle(ss$col, -ss$row, ss$scale)
+ gp <- gp + gcircle
+ }
+ return(gp)
+}
+
+
+# Convert the scale saliency information to DS9 regions.
+#
+# NOTE:
+# However, the rows and columns of the FITS matrix in R correspond
+# to the X and Y axes in DS9, which is *swapped*.
+# Thus the region width and height correspond to the row range and
+# column range, respectively.
+#
+# Arguments:
+# saliency - saliency restults by clusteredSaliency()
+# Return:
+# vector of DS9 region strings
+saliency2region <- function(saliency) {
+ regions <- with(saliency,
+ paste("circle(", row, ",", col, ",", scale, ")",
+ sep=""))
+ return(regions)
+}
+
+
+# Write DS9 region to file with appropriate header information.
+#
+# Arguments:
+# filename - output region file
+# region - vector/list of region strings
+save.region <- function(filename, region) {
+ rf <- file(filename, "w")
+ region.hdr <- c("# Region file format: DS9 version 4.1", "image")
+ writeLines(region.hdr, rf)
+ writeLines(region, rf)
+ close(rf)
+}
+
+
+# vim: set ts=8 sw=4 tw=0 fenc=utf-8 ft=r: #
diff --git a/rand/luminosity_func.py b/rand/luminosity_func.py
new file mode 100644
index 0000000..8cc46ee
--- /dev/null
+++ b/rand/luminosity_func.py
@@ -0,0 +1,96 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+#
+# Aaron LI
+# 2015/07/01
+#
+
+"""
+Generate random numbers (i.e., fluxes) with respect to the
+provided luminosity function.
+"""
+
+import numpy as np
+import random
+
+def luminosity_func(Lx, N0=1.0):
+ """
+ The *cumulative* luminosity function: N(>=L)
+ The number of objects with luminosities >= L(x) for each L(x).
+ """
+ # broken power-law model (Xu et al. 2005)
+ # Nx = (1) N0 * (Lx/L_b)^(-alpha_l); for Lx <= L_b
+ # (2) N0 * (Lx/L_b)^(-alpha_h); for Lx > L_b
+ L_b = 4.4e38 # break point (erg/s) (+2.0/-1.4)
+ alpha_h = 2.28 # (+1.72/-0.53)
+ alpha_l = 1.08 # (+0.15/-0.33)
+ if isinstance(Lx, np.ndarray):
+ Nx = np.zeros(Lx.shape)
+ Nx[Lx <= 0] = 0.0
+ Nx[Lx <= L_b] = N0 * (Lx[Lx <= L_b] / L_b)**(-alpha_l)
+ Nx[Lx > L_b] = N0 * (Lx[Lx > L_b] / L_b)**(-alpha_h)
+ else:
+ # Lx is a single number
+ if Lx <= 0.0:
+ Nx = 0.0
+ elif Lx <= L_b:
+ Nx = N0 * (Lx/L_b)**(-alpha_l)
+ else:
+ Nx = N0 * (Lx/L_b)**(-alpha_h)
+ return Nx
+
+
+def luminosity_density(Lx, N0=1.0):
+ """
+ Function of number density at luminosity at Lx. => PDF
+
+ PDF(Lx) = - d(luminosity_func(Lx) / d(Lx)
+ """
+ L_b = 4.4e38 # break point (erg/s) (+2.0/-1.4)
+ alpha_h = 2.28 # (+1.72/-0.53)
+ alpha_l = 1.08 # (+0.15/-0.33)
+ if isinstance(Lx, np.ndarray):
+ Px = np.zeros(Lx.shape)
+ Px[Lx<=0] = 0.0
+ Px[Lx<=L_b] = N0 * (alpha_l/L_b) * (Lx[Lx<=L_b] / L_b)**(-alpha_l-1)
+ Px[Lx>L_b] = N0 * (alpha_h/L_b) * (Lx[Lx>L_b] / L_b)**(-alpha_h-1)
+ else:
+ # Lx is a single number
+ if Lx <= 0.0:
+ Px = 0.0
+ elif Lx <= L_b:
+ Px = N0 * (alpha_l/L_b) * (Lx/L_b)**(-alpha_l-1)
+ else:
+ Px = N0 * (alpha_h/L_b) * (Lx/L_b)**(-alpha_h-1)
+ return Px
+
+
+def luminosity_pdf(Lx):
+ """
+ Probability density function
+ """
+ h = 1e-5 * Lx # step size for numerical deviation
+ p = - (luminosity_func(Lx+0.5*h) - luminosity_func(Lx-0.5*h)) / h
+ return p
+
+
+def sampler(min, max, number=1):
+ """
+ Generate a sample of luminosity values within [min, max] from
+ the above luminosity distribution.
+ """
+ # Get the maximum value of the density function
+ M = luminosity_density(min)
+ results = []
+ for i in range(number):
+ while True:
+ u = random.random() * M
+ y = random.random() * (max-min) + min
+ if u <= luminosity_density(y):
+ results.append(y)
+ break
+ if len(results) == 1:
+ return results[0]
+ else:
+ return np.array(results)
+
diff --git a/rand/pointsrc_coord.py b/rand/pointsrc_coord.py
new file mode 100644
index 0000000..1da9be2
--- /dev/null
+++ b/rand/pointsrc_coord.py
@@ -0,0 +1,98 @@
+# -*- coding: utf-8 -*-
+#
+# Aaron LI
+# 2015/07/01
+#
+
+"""
+Generate random coordinates for point sources with respect to the r^{1/4}
+distribution.
+"""
+
+import numpy as np
+import random
+
+
+def cdf(r, N0=1.0):
+ """
+ Cumulative distribution function of the number of point sources.
+
+ r^{1/4} distribution law: de Vaucouleurs 1948
+ """
+ return N0 * r**(1.0/4.0)
+
+
+def pdf(r, N0=1.0):
+ """
+ Density function of the number of point sources.
+
+ pdf = d(pdf) / d(r)
+ """
+ if isinstance(r, np.ndarray):
+ p = np.zeros(r.shape)
+ p[r<=0.0] = 0.0
+ p[r>0.0] = 0.25 * N0 * r[r>0.0]**(-3.0/4.0)
+ else:
+ if r <= 0.0:
+ p = 0.0
+ else:
+ p = 0.25 * N0 * r**(-3.0/4.0)
+ return p
+
+
+def sampler(min, max, number=1):
+ """
+ Generate a sample of coordinates (only r) within [min, max] from
+ the above density distribution.
+
+ min, max: the minimum and maximum r values (in degree)
+ """
+ # Get the maximum value of the density function
+ M = pdf(min)
+ results = []
+ for i in range(number):
+ while True:
+ u = random.random() * M
+ y = random.random() * (max-min) + min
+ if u <= pdf(y):
+ results.append(y)
+ break
+ if len(results) == 1:
+ return results[0]
+ else:
+ return np.array(results)
+
+
+def add_angle(r):
+ """
+ Add angle for each r value to make up a coordinate of a polar coordinate.
+ """
+ coords = []
+ for ri in r:
+ theta = random.random() * 360
+ coords.append((ri, theta))
+ if len(coords) == 1:
+ return coords[0]
+ else:
+ return coords
+
+
+def to_radec(coords, xc=0, yc=0):
+ """
+ Convert the generated coordinates to (ra, dec) (unit: degree).
+
+ xc, yc: the center coordinate (ra, dec)
+ """
+ results = []
+ for r, theta in coords:
+ # FIXME: spherical algebra should be used!!!
+ dx = r * np.cos(theta*np.pi/180)
+ dy = r * np.sin(theta*np.pi/180)
+ x = xc + dx
+ y = yc + dy
+ results.append((x, y))
+ if len(results) == 1:
+ return results[0]
+ else:
+ return results
+
diff --git a/rand/sphere.py b/rand/sphere.py
new file mode 100644
index 0000000..4220766
--- /dev/null
+++ b/rand/sphere.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# Randomly pick point on the sphere surface.
+#
+# References:
+# [1] Shpere Poin Picking -- from Wolfram MathWorld
+# http://mathworld.wolfram.com/SpherePointPicking.html
+# [2] Random Points on a Sphere
+# https://www.jasondavies.com/maps/random-points/
+#
+# Aaron LI
+# 2015/06/18
+
+__version__ = "0.1.0"
+__date__ = "2015/06/16"
+
+import math
+import random
+
+def sphere_point(n=1, unit="rad"):
+ """
+ Randomly uniformly pick a point on the sphere surface.
+ Using the method "Sphere Point Picking" from Wolfram MathWorld.
+
+ Arguments:
+ n: number of points to be generated
+ unit: unit of output values: rad/deg
+
+ Return:
+ (theta, phi): spherical coordinate (unit: rad).
+ theta: [0, 2\pi); phi: [0 - \pi]
+ If n > 1, then return a list of (theta, phi)
+ """
+ points = []
+ for i in range(n):
+ u = random.random()
+ v = random.random()
+ theta = 2.0 * math.pi * u
+ phi = math.acos(2.0*v - 1.0)
+ if unit == "deg":
+ theta = rad2deg(theta)
+ phi = rad2deg(phi)
+ points.append((theta, phi))
+ if n == 1:
+ return points[0]
+ else:
+ return points
+
+
+def rad2deg(x):
+ return x * 180.0 / math.pi
+
+def deg2rad(x):
+ return x * math.pi / 180.0
+
+
diff --git a/region/region.py b/region/region.py
new file mode 100644
index 0000000..47a1636
--- /dev/null
+++ b/region/region.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+#
+# Aaron Li
+# 2015/06/19
+
+"""
+Class Region for regions on the spherical surface.
+Used in astronomy to select/define a certian region, e.g, DS9.
+"""
+
+import sys
+
+
+class Region(object):
+ """
+ Basic region class for regions on the spherical surface,
+ similar definition as to DS9 regions.
+
+ Coordinate style: (ra, dec)
+ Unit: degree
+ ra: [0, 2\pi)
+ dec: [-\pi/2, \pi/2]
+ """
+
+ # currently supported region types (similar to DS9)
+ REGION_TYPES = ["circle", "ellipse", "box", "annulus", "pie", "panda"]
+
+ def __init__(self, regtype, xc, yc,
+ radius=None, radius2=None,
+ width=None, height=None, rotation=None,
+ start=None, end=None):
+ if regtype.lower() not in self.REGION_TYPES:
+ raise ValueError("only following region types supported: %s" %\
+ " ".join(self.REGION_TYPES))
+ self.regtype = regtype.lower()
+ self.xc = xc
+ self.yc = yc
+ self.radius = radius
+ self.radius2 = radius2
+ self.width = width
+ self.height = height
+ self.rotation = rotation
+
+ def __repr__(self):
+ return "Region: %s" % self.regtype
+
+ def dump(self):
+ return {"regtype": self.regtype,
+ "xc": self.xc,
+ "yc": self.yc,
+ "radius": self.radius,
+ "radius2": self.radius2,
+ "width": self.width,
+ "height": self.height,
+ "rotation": self.rotation
+ }
+
+ def is_inside(self, point):
+ """
+ Determine whether the given point is inside the region.
+ """
+ x = point[0]
+ y = point[1]
+ if self.regtype == "box":
+ #print("WARNING: rotation box currently not supported!",
+ # file=sys.stderr)
+ xmin = self.xc - self.width/2.0
+ xmax = self.xc + self.width/2.0
+ ymin = self.yc - self.height/2.0
+ ymax = self.yc + self.height/2.0
+ if all([x >= xmin, x <= xmax, y >= ymin, y <= ymax]):
+ return True
+ else:
+ return False
+ else:
+ raise ValueError("region type '%s' currently not implemented" %\
+ self.regtype)
+