aboutsummaryrefslogtreecommitdiffstats
path: root/fg21sim/extragalactic/clusters/main.py
diff options
context:
space:
mode:
authorAaron LI <aly@aaronly.me>2018-01-02 15:46:39 +0800
committerAaron LI <aly@aaronly.me>2018-01-02 15:58:02 +0800
commitf61a99b11ce344ac463202bad8a171f9d0287fa3 (patch)
treebf2ecb013668d6e753f3a62f6e9a47a958d002cd /fg21sim/extragalactic/clusters/main.py
parent89e00a8252df9c00c4421cc145f8b548b876b74c (diff)
downloadfg21sim-f61a99b11ce344ac463202bad8a171f9d0287fa3.tar.bz2
clusters: pad the catalog merger events to allow CSV save
Diffstat (limited to 'fg21sim/extragalactic/clusters/main.py')
-rw-r--r--fg21sim/extragalactic/clusters/main.py15
1 files changed, 14 insertions, 1 deletions
diff --git a/fg21sim/extragalactic/clusters/main.py b/fg21sim/extragalactic/clusters/main.py
index cf5a3dc..bc5326c 100644
--- a/fg21sim/extragalactic/clusters/main.py
+++ b/fg21sim/extragalactic/clusters/main.py
@@ -224,8 +224,10 @@ class GalaxyClusters:
"merger_z - redshift of each merger",
"merger_age - [Gyr] cosmic age at each merger",
]
- logger.info("%d (%.1f%%) clusters experience recent mergers." %
+ logger.info("%d (%.1f%%) clusters experienced recent mergers." %
(num_hasmerger, 100*num_hasmerger/num))
+ nmax = max([cdict["merger_num"] for cdict in self.catalog])
+ logger.info("Maximum number of merger events: %d" % nmax)
def _simulate_halos(self):
"""
@@ -394,6 +396,17 @@ class GalaxyClusters:
os.rename(outfile, outfile+".old")
logger.info("Converting cluster catalog into a Pandas DataFrame ...")
+ # Pad the merger events to be same length
+ nmax = max([cdict["merger_num"] for cdict in self.catalog])
+ for cdict in self.catalog:
+ num = cdict["merger_num"]
+ cdict.update([
+ ("merger_mass1", cdict["merger_mass1"] + [None]*(nmax-num)),
+ ("merger_mass2", cdict["merger_mass2"] + [None]*(nmax-num)),
+ ("merger_z", cdict["merger_z"] + [None]*(nmax-num)),
+ ("merger_age", cdict["merger_age"] + [None]*(nmax-num)),
+ ])
+
keys = list(self.catalog[0].keys())
catalog_df = dictlist_to_dataframe(self.catalog, keys=keys)
dataframe_to_csv(catalog_df, outfile=outfile,