lsst.pipe.tasks  8.5-hsc+2
functors.py
Go to the documentation of this file.
1 import yaml
2 import re
3 
4 import pandas as pd
5 import numpy as np
6 import astropy.units as u
7 
8 from lsst.daf.persistence import doImport
9 from .parquetTable import MultilevelParquetTable
10 
11 
12 def init_fromDict(initDict, basePath='lsst.pipe.tasks.functors',
13  typeKey='functor', name=None):
14  """Initialize an object defined in a dictionary
15 
16  The object needs to be importable as
17  '{0}.{1}'.format(basePath, initDict[typeKey])
18  The positional and keyword arguments (if any) are contained in
19  "args" and "kwargs" entries in the dictionary, respectively.
20  This is used in `functors.CompositeFunctor.from_yaml` to initialize
21  a composite functor from a specification in a YAML file.
22 
23  Parameters
24  ----------
25  initDict : dictionary
26  Dictionary describing object's initialization. Must contain
27  an entry keyed by ``typeKey`` that is the name of the object,
28  relative to ``basePath``.
29  basePath : str
30  Path relative to module in which ``initDict[typeKey]`` is defined.
31  typeKey : str
32  Key of ``initDict`` that is the name of the object
33  (relative to `basePath`).
34  """
35  initDict = initDict.copy()
36  # TO DO: DM-21956 We should be able to define functors outside this module
37  pythonType = doImport('{0}.{1}'.format(basePath, initDict.pop(typeKey)))
38  args = []
39  if 'args' in initDict:
40  args = initDict.pop('args')
41  if isinstance(args, str):
42  args = [args]
43  try:
44  element = pythonType(*args, **initDict)
45  except Exception as e:
46  message = f'Error in constructing functor "{name}" of type {pythonType.__name__} with args: {args}'
47  raise type(e)(message, e.args)
48  return element
49 
50 
51 class Functor(object):
52  """Define and execute a calculation on a ParquetTable
53 
54  The `__call__` method accepts a `ParquetTable` object, and returns the
55  result of the calculation as a single column. Each functor defines what
56  columns are needed for the calculation, and only these columns are read
57  from the `ParquetTable`.
58 
59  The action of `__call__` consists of two steps: first, loading the
60  necessary columns from disk into memory as a `pandas.DataFrame` object;
61  and second, performing the computation on this dataframe and returning the
62  result.
63 
64 
65  To define a new `Functor`, a subclass must define a `_func` method,
66  that takes a `pandas.DataFrame` and returns result in a `pandas.Series`.
67  In addition, it must define the following attributes
68 
69  * `_columns`: The columns necessary to perform the calculation
70  * `name`: A name appropriate for a figure axis label
71  * `shortname`: A name appropriate for use as a dictionary key
72 
73  On initialization, a `Functor` should declare what filter (`filt` kwarg)
74  and dataset (e.g. `'ref'`, `'meas'`, `'forced_src'`) it is intended to be
75  applied to. This enables the `_get_cols` method to extract the proper
76  columns from the parquet file. If not specified, the dataset will fall back
77  on the `_defaultDataset`attribute. If filter is not specified and `dataset`
78  is anything other than `'ref'`, then an error will be raised when trying to
79  perform the calculation.
80 
81  As currently implemented, `Functor` is only set up to expect a
82  `ParquetTable` of the format of the `deepCoadd_obj` dataset; that is, a
83  `MultilevelParquetTable` with the levels of the column index being `filter`,
84  `dataset`, and `column`. This is defined in the `_columnLevels` attribute,
85  as well as being implicit in the role of the `filt` and `dataset` attributes
86  defined at initialization. In addition, the `_get_cols` method that reads
87  the dataframe from the `ParquetTable` will return a dataframe with column
88  index levels defined by the `_dfLevels` attribute; by default, this is
89  `column`.
90 
91  The `_columnLevels` and `_dfLevels` attributes should generally not need to
92  be changed, unless `_func` needs columns from multiple filters or datasets
93  to do the calculation.
94  An example of this is the `lsst.pipe.tasks.functors.Color` functor, for
95  which `_dfLevels = ('filter', 'column')`, and `_func` expects the dataframe
96  it gets to have those levels in the column index.
97 
98  Parameters
99  ----------
100  filt : str
101  Filter upon which to do the calculation
102 
103  dataset : str
104  Dataset upon which to do the calculation
105  (e.g., 'ref', 'meas', 'forced_src').
106 
107  """
108 
109  _defaultDataset = 'ref'
110  _columnLevels = ('filter', 'dataset', 'column')
111  _dfLevels = ('column',)
112  _defaultNoDup = False
113 
114  def __init__(self, filt=None, dataset=None, noDup=None):
115  self.filt = filt
116  self.dataset = dataset if dataset is not None else self._defaultDataset
117  self._noDup = noDup
118 
119  @property
120  def noDup(self):
121  if self._noDup is not None:
122  return self._noDup
123  else:
124  return self._defaultNoDup
125 
126  @property
127  def columns(self):
128  """Columns required to perform calculation
129  """
130  if not hasattr(self, '_columns'):
131  raise NotImplementedError('Must define columns property or _columns attribute')
132  return self._columns
133 
134  def multilevelColumns(self, parq):
135  if not set(parq.columnLevels) == set(self._columnLevels):
136  raise ValueError('ParquetTable does not have the expected column levels. ' +
137  'Got {0}; expected {1}.'.format(parq.columnLevels, self._columnLevels))
138 
139  columnDict = {'column': self.columns,
140  'dataset': self.dataset}
141  if self.filt is None:
142  if 'filter' in parq.columnLevels:
143  if self.dataset == 'ref':
144  columnDict['filter'] = parq.columnLevelNames['filter'][0]
145  else:
146  raise ValueError("'filt' not set for functor {}".format(self.name) +
147  "(dataset {}) ".format(self.dataset) +
148  "and ParquetTable " +
149  "contains multiple filters in column index. " +
150  "Set 'filt' or set 'dataset' to 'ref'.")
151  else:
152  columnDict['filter'] = self.filt
153 
154  return parq._colsFromDict(columnDict)
155 
156  def _func(self, df, dropna=True):
157  raise NotImplementedError('Must define calculation on dataframe')
158 
159  def _get_cols(self, parq):
160  """Retrieve dataframe necessary for calculation.
161 
162  Returns dataframe upon which `self._func` can act.
163  """
164  if isinstance(parq, MultilevelParquetTable):
165  columns = self.multilevelColumns(parq)
166  df = parq.toDataFrame(columns=columns, droplevels=False)
167  df = self._setLevels(df)
168  else:
169  columns = self.columns
170  df = parq.toDataFrame(columns=columns)
171 
172  return df
173 
174  def _setLevels(self, df):
175  levelsToDrop = [n for n in df.columns.names if n not in self._dfLevels]
176  df.columns = df.columns.droplevel(levelsToDrop)
177  return df
178 
179  def _dropna(self, vals):
180  return vals.dropna()
181 
182  def __call__(self, parq, dropna=False):
183  try:
184  df = self._get_cols(parq)
185  vals = self._func(df)
186  except Exception:
187  vals = self.fail(df)
188  if dropna:
189  vals = self._dropna(vals)
190 
191  return vals
192 
193  def fail(self, df):
194  return pd.Series(np.full(len(df), np.nan), index=df.index)
195 
196  @property
197  def name(self):
198  """Full name of functor (suitable for figure labels)
199  """
200  return NotImplementedError
201 
202  @property
203  def shortname(self):
204  """Short name of functor (suitable for column name/dict key)
205  """
206  return self.name
207 
208 
210  """Perform multiple calculations at once on a catalog
211 
212  The role of a `CompositeFunctor` is to group together computations from
213  multiple functors. Instead of returning `pandas.Series` a
214  `CompositeFunctor` returns a `pandas.Dataframe`, with the column names
215  being the keys of `funcDict`.
216 
217  The `columns` attribute of a `CompositeFunctor` is the union of all columns
218  in all the component functors.
219 
220  A `CompositeFunctor` does not use a `_func` method itself; rather,
221  when a `CompositeFunctor` is called, all its columns are loaded
222  at once, and the resulting dataframe is passed to the `_func` method of each component
223  functor. This has the advantage of only doing I/O (reading from parquet file) once,
224  and works because each individual `_func` method of each component functor does not
225  care if there are *extra* columns in the dataframe being passed; only that it must contain
226  *at least* the `columns` it expects.
227 
228  An important and useful class method is `from_yaml`, which takes as argument the path to a YAML
229  file specifying a collection of functors.
230 
231  Parameters
232  ----------
233  funcs : `dict` or `list`
234  Dictionary or list of functors. If a list, then it will be converted
235  into a dictonary according to the `.shortname` attribute of each functor.
236 
237  """
238  dataset = None
239 
240  def __init__(self, funcs, **kwargs):
241 
242  if type(funcs) == dict:
243  self.funcDict = funcs
244  else:
245  self.funcDict = {f.shortname: f for f in funcs}
246 
247  self._filt = None
248 
249  super().__init__(**kwargs)
250 
251  @property
252  def filt(self):
253  return self._filt
254 
255  @filt.setter
256  def filt(self, filt):
257  if filt is not None:
258  for _, f in self.funcDict.items():
259  f.filt = filt
260  self._filt = filt
261 
262  def update(self, new):
263  if isinstance(new, dict):
264  self.funcDict.update(new)
265  elif isinstance(new, CompositeFunctor):
266  self.funcDict.update(new.funcDict)
267  else:
268  raise TypeError('Can only update with dictionary or CompositeFunctor.')
269 
270  # Make sure new functors have the same 'filt' set
271  if self.filt is not None:
272  self.filt = self.filt
273 
274  @property
275  def columns(self):
276  return list(set([x for y in [f.columns for f in self.funcDict.values()] for x in y]))
277 
278  def multilevelColumns(self, parq):
279  return list(set([x for y in [f.multilevelColumns(parq)
280  for f in self.funcDict.values()] for x in y]))
281 
282  def __call__(self, parq, **kwargs):
283  if isinstance(parq, MultilevelParquetTable):
284  columns = self.multilevelColumns(parq)
285  df = parq.toDataFrame(columns=columns, droplevels=False)
286  valDict = {}
287  for k, f in self.funcDict.items():
288  try:
289  subdf = f._setLevels(df[f.multilevelColumns(parq)])
290  valDict[k] = f._func(subdf)
291  except Exception:
292  valDict[k] = f.fail(subdf)
293  else:
294  columns = self.columns
295  df = parq.toDataFrame(columns=columns)
296  valDict = {k: f._func(df) for k, f in self.funcDict.items()}
297 
298  try:
299  valDf = pd.concat(valDict, axis=1)
300  except TypeError:
301  print([(k, type(v)) for k, v in valDict.items()])
302  raise
303 
304  if kwargs.get('dropna', False):
305  valDf = valDf.dropna(how='any')
306 
307  return valDf
308 
309  @classmethod
310  def renameCol(cls, col, renameRules):
311  if renameRules is None:
312  return col
313  for old, new in renameRules:
314  if col.startswith(old):
315  col = col.replace(old, new)
316  return col
317 
318  @classmethod
319  def from_file(cls, filename, **kwargs):
320  with open(filename) as f:
321  translationDefinition = yaml.safe_load(f)
322 
323  return cls.from_yaml(translationDefinition, **kwargs)
324 
325  @classmethod
326  def from_yaml(cls, translationDefinition, **kwargs):
327  funcs = {}
328  for func, val in translationDefinition['funcs'].items():
329  funcs[func] = init_fromDict(val, name=func)
330 
331  if 'flag_rename_rules' in translationDefinition:
332  renameRules = translationDefinition['flag_rename_rules']
333  else:
334  renameRules = None
335 
336  if 'refFlags' in translationDefinition:
337  for flag in translationDefinition['refFlags']:
338  funcs[cls.renameCol(flag, renameRules)] = Column(flag, dataset='ref')
339 
340  if 'flags' in translationDefinition:
341  for flag in translationDefinition['flags']:
342  funcs[cls.renameCol(flag, renameRules)] = Column(flag, dataset='meas')
343 
344  return cls(funcs, **kwargs)
345 
346 
347 def mag_aware_eval(df, expr):
348  """Evaluate an expression on a DataFrame, knowing what the 'mag' function means
349 
350  Builds on `pandas.DataFrame.eval`, which parses and executes math on dataframes.
351 
352  Parameters
353  ----------
354  df : pandas.DataFrame
355  Dataframe on which to evaluate expression.
356 
357  expr : str
358  Expression.
359  """
360  try:
361  expr_new = re.sub(r'mag\((\w+)\)', r'-2.5*log(\g<1>)/log(10)', expr)
362  val = df.eval(expr_new, truediv=True)
363  except Exception: # Should check what actually gets raised
364  expr_new = re.sub(r'mag\((\w+)\)', r'-2.5*log(\g<1>_instFlux)/log(10)', expr)
365  val = df.eval(expr_new, truediv=True)
366  return val
367 
368 
370  """Arbitrary computation on a catalog
371 
372  Column names (and thus the columns to be loaded from catalog) are found
373  by finding all words and trying to ignore all "math-y" words.
374 
375  Parameters
376  ----------
377  expr : str
378  Expression to evaluate, to be parsed and executed by `mag_aware_eval`.
379  """
380  _ignore_words = ('mag', 'sin', 'cos', 'exp', 'log', 'sqrt')
381 
382  def __init__(self, expr, **kwargs):
383  self.expr = expr
384  super().__init__(**kwargs)
385 
386  @property
387  def name(self):
388  return self.expr
389 
390  @property
391  def columns(self):
392  flux_cols = re.findall(r'mag\(\s*(\w+)\s*\)', self.expr)
393 
394  cols = [c for c in re.findall(r'[a-zA-Z_]+', self.expr) if c not in self._ignore_words]
395  not_a_col = []
396  for c in flux_cols:
397  if not re.search('_instFlux$', c):
398  cols.append('{}_instFlux'.format(c))
399  not_a_col.append(c)
400  else:
401  cols.append(c)
402 
403  return list(set([c for c in cols if c not in not_a_col]))
404 
405  def _func(self, df):
406  return mag_aware_eval(df, self.expr)
407 
408 
410  """Get column with specified name
411  """
412 
413  def __init__(self, col, **kwargs):
414  self.col = col
415  super().__init__(**kwargs)
416 
417  @property
418  def name(self):
419  return self.col
420 
421  @property
422  def columns(self):
423  return [self.col]
424 
425  def _func(self, df):
426  return df[self.col]
427 
428 
429 class Index(Functor):
430  """Return the value of the index for each object
431  """
432 
433  columns = ['coord_ra'] # just a dummy; something has to be here
434  _defaultDataset = 'ref'
435  _defaultNoDup = True
436 
437  def _func(self, df):
438  return pd.Series(df.index, index=df.index)
439 
440 
442  col = 'id'
443  _allow_difference = False
444  _defaultNoDup = True
445 
446  def _func(self, df):
447  return pd.Series(df.index, index=df.index)
448 
449 
451  col = 'base_Footprint_nPix'
452 
453 
455  """Base class for coordinate column, in degrees
456  """
457  _radians = True
458 
459  def __init__(self, col, **kwargs):
460  super().__init__(col, **kwargs)
461 
462  def _func(self, df):
463  # Must not modify original column in case that column is used by another functor
464  output = df[self.col] * 180 / np.pi if self._radians else df[self.col]
465  return output
466 
467 
469  """Right Ascension, in degrees
470  """
471  name = 'RA'
472  _defaultNoDup = True
473 
474  def __init__(self, **kwargs):
475  super().__init__('coord_ra', **kwargs)
476 
477  def __call__(self, catalog, **kwargs):
478  return super().__call__(catalog, **kwargs)
479 
480 
482  """Declination, in degrees
483  """
484  name = 'Dec'
485  _defaultNoDup = True
486 
487  def __init__(self, **kwargs):
488  super().__init__('coord_dec', **kwargs)
489 
490  def __call__(self, catalog, **kwargs):
491  return super().__call__(catalog, **kwargs)
492 
493 
494 def fluxName(col):
495  if not col.endswith('_instFlux'):
496  col += '_instFlux'
497  return col
498 
499 
500 def fluxErrName(col):
501  if not col.endswith('_instFluxErr'):
502  col += '_instFluxErr'
503  return col
504 
505 
506 class Mag(Functor):
507  """Compute calibrated magnitude
508 
509  Takes a `calib` argument, which returns the flux at mag=0
510  as `calib.getFluxMag0()`. If not provided, then the default
511  `fluxMag0` is 63095734448.0194, which is default for HSC.
512  This default should be removed in DM-21955
513 
514  This calculation hides warnings about invalid values and dividing by zero.
515 
516  As for all functors, a `dataset` and `filt` kwarg should be provided upon
517  initialization. Unlike the default `Functor`, however, the default dataset
518  for a `Mag` is `'meas'`, rather than `'ref'`.
519 
520  Parameters
521  ----------
522  col : `str`
523  Name of flux column from which to compute magnitude. Can be parseable
524  by `lsst.pipe.tasks.functors.fluxName` function---that is, you can pass
525  `'modelfit_CModel'` instead of `'modelfit_CModel_instFlux'`) and it will
526  understand.
527  calib : `lsst.afw.image.calib.Calib` (optional)
528  Object that knows zero point.
529  """
530  _defaultDataset = 'meas'
531 
532  def __init__(self, col, calib=None, **kwargs):
533  self.col = fluxName(col)
534  self.calib = calib
535  if calib is not None:
536  self.fluxMag0 = calib.getFluxMag0()[0]
537  else:
538  # TO DO: DM-21955 Replace hard coded photometic calibration values
539  self.fluxMag0 = 63095734448.0194
540 
541  super().__init__(**kwargs)
542 
543  @property
544  def columns(self):
545  return [self.col]
546 
547  def _func(self, df):
548  with np.warnings.catch_warnings():
549  np.warnings.filterwarnings('ignore', r'invalid value encountered')
550  np.warnings.filterwarnings('ignore', r'divide by zero')
551  return -2.5*np.log10(df[self.col] / self.fluxMag0)
552 
553  @property
554  def name(self):
555  return 'mag_{0}'.format(self.col)
556 
557 
558 class MagErr(Mag):
559  """Compute calibrated magnitude uncertainty
560 
561  Takes the same `calib` object as `lsst.pipe.tasks.functors.Mag`.
562 
563  Parameters
564  col : `str`
565  Name of flux column
566  calib : `lsst.afw.image.calib.Calib` (optional)
567  Object that knows zero point.
568  """
569 
570  def __init__(self, *args, **kwargs):
571  super().__init__(*args, **kwargs)
572  if self.calib is not None:
573  self.fluxMag0Err = self.calib.getFluxMag0()[1]
574  else:
575  self.fluxMag0Err = 0.
576 
577  @property
578  def columns(self):
579  return [self.col, self.col + 'Err']
580 
581  def _func(self, df):
582  with np.warnings.catch_warnings():
583  np.warnings.filterwarnings('ignore', r'invalid value encountered')
584  np.warnings.filterwarnings('ignore', r'divide by zero')
585  fluxCol, fluxErrCol = self.columns
586  x = df[fluxErrCol] / df[fluxCol]
587  y = self.fluxMag0Err / self.fluxMag0
588  magErr = (2.5 / np.log(10.)) * np.sqrt(x*x + y*y)
589  return magErr
590 
591  @property
592  def name(self):
593  return super().name + '_err'
594 
595 
597  """
598  """
599 
600  def _func(self, df):
601  return (df[self.col] / self.fluxMag0) * 1e9
602 
603 
605  _defaultDataset = 'meas'
606 
607  """Functor to calculate magnitude difference"""
608 
609  def __init__(self, col1, col2, **kwargs):
610  self.col1 = fluxName(col1)
611  self.col2 = fluxName(col2)
612  super().__init__(**kwargs)
613 
614  @property
615  def columns(self):
616  return [self.col1, self.col2]
617 
618  def _func(self, df):
619  with np.warnings.catch_warnings():
620  np.warnings.filterwarnings('ignore', r'invalid value encountered')
621  np.warnings.filterwarnings('ignore', r'divide by zero')
622  return -2.5*np.log10(df[self.col1]/df[self.col2])
623 
624  @property
625  def name(self):
626  return '(mag_{0} - mag_{1})'.format(self.col1, self.col2)
627 
628  @property
629  def shortname(self):
630  return 'magDiff_{0}_{1}'.format(self.col1, self.col2)
631 
632 
633 class Color(Functor):
634  """Compute the color between two filters
635 
636  Computes color by initializing two different `Mag`
637  functors based on the `col` and filters provided, and
638  then returning the difference.
639 
640  This is enabled by the `_func` expecting a dataframe with a
641  multilevel column index, with both `'filter'` and `'column'`,
642  instead of just `'column'`, which is the `Functor` default.
643  This is controlled by the `_dfLevels` attribute.
644 
645  Also of note, the default dataset for `Color` is `forced_src'`,
646  whereas for `Mag` it is `'meas'`.
647 
648  Parameters
649  ----------
650  col : str
651  Name of flux column from which to compute; same as would be passed to
652  `lsst.pipe.tasks.functors.Mag`.
653 
654  filt2, filt1 : str
655  Filters from which to compute magnitude difference.
656  Color computed is `Mag(filt2) - Mag(filt1)`.
657  """
658  _defaultDataset = 'forced_src'
659  _dfLevels = ('filter', 'column')
660  _defaultNoDup = True
661 
662  def __init__(self, col, filt2, filt1, **kwargs):
663  self.col = fluxName(col)
664  if filt2 == filt1:
665  raise RuntimeError("Cannot compute Color for %s: %s - %s " % (col, filt2, filt1))
666  self.filt2 = filt2
667  self.filt1 = filt1
668 
669  self.mag2 = Mag(col, filt=filt2, **kwargs)
670  self.mag1 = Mag(col, filt=filt1, **kwargs)
671 
672  super().__init__(**kwargs)
673 
674  @property
675  def filt(self):
676  return None
677 
678  @filt.setter
679  def filt(self, filt):
680  pass
681 
682  def _func(self, df):
683  mag2 = self.mag2._func(df[self.filt2])
684  mag1 = self.mag1._func(df[self.filt1])
685  return mag2 - mag1
686 
687  @property
688  def columns(self):
689  return [self.mag1.col, self.mag2.col]
690 
691  def multilevelColumns(self, parq):
692  return [(self.dataset, self.filt1, self.col),
693  (self.dataset, self.filt2, self.col)]
694 
695  @property
696  def name(self):
697  return '{0} - {1} ({2})'.format(self.filt2, self.filt1, self.col)
698 
699  @property
700  def shortname(self):
701  return '{0}_{1}m{2}'.format(self.col, self.filt2.replace('-', ''),
702  self.filt1.replace('-', ''))
703 
704 
706  """Main function of this subclass is to override the dropna=True
707  """
708  _null_label = 'null'
709  _allow_difference = False
710  name = 'label'
711  _force_str = False
712 
713  def __call__(self, parq, dropna=False, **kwargs):
714  return super().__call__(parq, dropna=False, **kwargs)
715 
716 
718  _columns = ["base_ClassificationExtendedness_value"]
719  _column = "base_ClassificationExtendedness_value"
720 
721  def _func(self, df):
722  x = df[self._columns][self._column]
723  mask = x.isnull()
724  test = (x < 0.5).astype(int)
725  test = test.mask(mask, 2)
726 
727  # TODO: DM-21954 Look into veracity of inline comment below
728  # are these backwards?
729  categories = ['galaxy', 'star', self._null_label]
730  label = pd.Series(pd.Categorical.from_codes(test, categories=categories),
731  index=x.index, name='label')
732  if self._force_str:
733  label = label.astype(str)
734  return label
735 
736 
738  _columns = ['numStarFlags']
739  labels = {"star": 0, "maybe": 1, "notStar": 2}
740 
741  def _func(self, df):
742  x = df[self._columns][self._columns[0]]
743 
744  # Number of filters
745  n = len(x.unique()) - 1
746 
747  labels = ['noStar', 'maybe', 'star']
748  label = pd.Series(pd.cut(x, [-1, 0, n-1, n], labels=labels),
749  index=x.index, name='label')
750 
751  if self._force_str:
752  label = label.astype(str)
753 
754  return label
755 
756 
758  name = 'Deconvolved Moments'
759  shortname = 'deconvolvedMoments'
760  _columns = ("ext_shapeHSM_HsmSourceMoments_xx",
761  "ext_shapeHSM_HsmSourceMoments_yy",
762  "base_SdssShape_xx", "base_SdssShape_yy",
763  "ext_shapeHSM_HsmPsfMoments_xx",
764  "ext_shapeHSM_HsmPsfMoments_yy")
765 
766  def _func(self, df):
767  """Calculate deconvolved moments"""
768  if "ext_shapeHSM_HsmSourceMoments_xx" in df.columns: # _xx added by tdm
769  hsm = df["ext_shapeHSM_HsmSourceMoments_xx"] + df["ext_shapeHSM_HsmSourceMoments_yy"]
770  else:
771  hsm = np.ones(len(df))*np.nan
772  sdss = df["base_SdssShape_xx"] + df["base_SdssShape_yy"]
773  if "ext_shapeHSM_HsmPsfMoments_xx" in df.columns:
774  psf = df["ext_shapeHSM_HsmPsfMoments_xx"] + df["ext_shapeHSM_HsmPsfMoments_yy"]
775  else:
776  # LSST does not have shape.sdss.psf. Could instead add base_PsfShape to catalog using
777  # exposure.getPsf().computeShape(s.getCentroid()).getIxx()
778  # raise TaskError("No psf shape parameter found in catalog")
779  raise RuntimeError('No psf shape parameter found in catalog')
780 
781  return hsm.where(np.isfinite(hsm), sdss) - psf
782 
783 
785  """Functor to calculate SDSS trace radius size for sources"""
786  name = "SDSS Trace Size"
787  shortname = 'sdssTrace'
788  _columns = ("base_SdssShape_xx", "base_SdssShape_yy")
789 
790  def _func(self, df):
791  srcSize = np.sqrt(0.5*(df["base_SdssShape_xx"] + df["base_SdssShape_yy"]))
792  return srcSize
793 
794 
796  """Functor to calculate SDSS trace radius size difference (%) between object and psf model"""
797  name = "PSF - SDSS Trace Size"
798  shortname = 'psf_sdssTrace'
799  _columns = ("base_SdssShape_xx", "base_SdssShape_yy",
800  "base_SdssShape_psf_xx", "base_SdssShape_psf_yy")
801 
802  def _func(self, df):
803  srcSize = np.sqrt(0.5*(df["base_SdssShape_xx"] + df["base_SdssShape_yy"]))
804  psfSize = np.sqrt(0.5*(df["base_SdssShape_psf_xx"] + df["base_SdssShape_psf_yy"]))
805  sizeDiff = 100*(srcSize - psfSize)/(0.5*(srcSize + psfSize))
806  return sizeDiff
807 
808 
810  """Functor to calculate HSM trace radius size for sources"""
811  name = 'HSM Trace Size'
812  shortname = 'hsmTrace'
813  _columns = ("ext_shapeHSM_HsmSourceMoments_xx",
814  "ext_shapeHSM_HsmSourceMoments_yy")
815 
816  def _func(self, df):
817  srcSize = np.sqrt(0.5*(df["ext_shapeHSM_HsmSourceMoments_xx"] +
818  df["ext_shapeHSM_HsmSourceMoments_yy"]))
819  return srcSize
820 
821 
823  """Functor to calculate HSM trace radius size difference (%) between object and psf model"""
824  name = 'PSF - HSM Trace Size'
825  shortname = 'psf_HsmTrace'
826  _columns = ("ext_shapeHSM_HsmSourceMoments_xx",
827  "ext_shapeHSM_HsmSourceMoments_yy",
828  "ext_shapeHSM_HsmPsfMoments_xx",
829  "ext_shapeHSM_HsmPsfMoments_yy")
830 
831  def _func(self, df):
832  srcSize = np.sqrt(0.5*(df["ext_shapeHSM_HsmSourceMoments_xx"] +
833  df["ext_shapeHSM_HsmSourceMoments_yy"]))
834  psfSize = np.sqrt(0.5*(df["ext_shapeHSM_HsmPsfMoments_xx"] +
835  df["ext_shapeHSM_HsmPsfMoments_yy"]))
836  sizeDiff = 100*(srcSize - psfSize)/(0.5*(srcSize + psfSize))
837  return sizeDiff
838 
839 
841  name = 'HSM Psf FWHM'
842  _columns = ('ext_shapeHSM_HsmPsfMoments_xx', 'ext_shapeHSM_HsmPsfMoments_yy')
843  # TODO: DM-21403 pixel scale should be computed from the CD matrix or transform matrix
844  pixelScale = 0.168
845  SIGMA2FWHM = 2*np.sqrt(2*np.log(2))
846 
847  def _func(self, df):
848  return self.pixelScale*self.SIGMA2FWHM*np.sqrt(
849  0.5*(df['ext_shapeHSM_HsmPsfMoments_xx'] + df['ext_shapeHSM_HsmPsfMoments_yy']))
850 
851 
852 class E1(Functor):
853  name = "Distortion Ellipticity (e1)"
854  shortname = "Distortion"
855 
856  def __init__(self, colXX, colXY, colYY, **kwargs):
857  self.colXX = colXX
858  self.colXY = colXY
859  self.colYY = colYY
860  self._columns = [self.colXX, self.colXY, self.colYY]
861  super().__init__(**kwargs)
862 
863  @property
864  def columns(self):
865  return [self.colXX, self.colXY, self.colYY]
866 
867  def _func(self, df):
868  return df[self.colXX] - df[self.colYY] / (df[self.colXX] + df[self.colYY])
869 
870 
871 class E2(Functor):
872  name = "Ellipticity e2"
873 
874  def __init__(self, colXX, colXY, colYY, **kwargs):
875  self.colXX = colXX
876  self.colXY = colXY
877  self.colYY = colYY
878  super().__init__(**kwargs)
879 
880  @property
881  def columns(self):
882  return [self.colXX, self.colXY, self.colYY]
883 
884  def _func(self, df):
885  return 2*df[self.colXY] / (df[self.colXX] + df[self.colYY])
886 
887 
889 
890  def __init__(self, colXX, colXY, colYY, **kwargs):
891  self.colXX = colXX
892  self.colXY = colXY
893  self.colYY = colYY
894  super().__init__(**kwargs)
895 
896  @property
897  def columns(self):
898  return [self.colXX, self.colXY, self.colYY]
899 
900  def _func(self, df):
901  return (df[self.colXX]*df[self.colYY] - df[self.colXY]**2)**0.25
902 
903 
905  """Computations using the stored localWcs.
906  """
907  name = "LocalWcsOperations"
908 
909  def __init__(self,
910  colCD_1_1,
911  colCD_1_2,
912  colCD_2_1,
913  colCD_2_2,
914  **kwargs):
915  self.colCD_1_1 = colCD_1_1
916  self.colCD_1_2 = colCD_1_2
917  self.colCD_2_1 = colCD_2_1
918  self.colCD_2_2 = colCD_2_2
919  super().__init__(**kwargs)
920 
921  def computeDeltaRaDec(self, x, y, cd11, cd12, cd21, cd22):
922  """Compute the distance on the sphere from x2, y1 to x1, y1.
923 
924  Parameters
925  ----------
926  x : `pandas.Series`
927  X pixel coordinate.
928  y : `pandas.Series`
929  Y pixel coordinate.
930  cd11 : `pandas.Series`
931  [1, 1] element of the local Wcs affine transform.
932  cd11 : `pandas.Series`
933  [1, 1] element of the local Wcs affine transform.
934  cd12 : `pandas.Series`
935  [1, 2] element of the local Wcs affine transform.
936  cd21 : `pandas.Series`
937  [2, 1] element of the local Wcs affine transform.
938  cd22 : `pandas.Series`
939  [2, 2] element of the local Wcs affine transform.
940 
941  Returns
942  -------
943  raDecTuple : tuple
944  RA and dec conversion of x and y given the local Wcs. Returned
945  units are in radians.
946 
947  """
948  return (x * cd11 + y * cd12, x * cd21 + y * cd22)
949 
950  def computeSkySeperation(self, ra1, dec1, ra2, dec2):
951  """Compute the local pixel scale conversion.
952 
953  Parameters
954  ----------
955  ra1 : `pandas.Series`
956  Ra of the first coordinate in radians.
957  dec1 : `pandas.Series`
958  Dec of the first coordinate in radians.
959  ra2 : `pandas.Series`
960  Ra of the second coordinate in radians.
961  dec2 : `pandas.Series`
962  Dec of the second coordinate in radians.
963 
964  Returns
965  -------
966  dist : `pandas.Series`
967  Distance on the sphere in radians.
968  """
969  deltaDec = dec2 - dec1
970  deltaRa = ra2 - ra1
971  return 2 * np.arcsin(
972  np.sqrt(
973  np.sin(deltaDec / 2) ** 2 +
974  np.cos(dec2) * np.cos(dec1) * np.sin(deltaRa / 2) ** 2))
975 
976  def getSkySeperationFromPixel(self, x1, y1, x2, y2, cd11, cd12, cd21, cd22):
977  """Compute the distance on the sphere from x2, y1 to x1, y1.
978 
979  Parameters
980  ----------
981  x1 : `pandas.Series`
982  X pixel coordinate.
983  y1 : `pandas.Series`
984  Y pixel coordinate.
985  x2 : `pandas.Series`
986  X pixel coordinate.
987  y2 : `pandas.Series`
988  Y pixel coordinate.
989  cd11 : `pandas.Series`
990  [1, 1] element of the local Wcs affine transform.
991  cd11 : `pandas.Series`
992  [1, 1] element of the local Wcs affine transform.
993  cd12 : `pandas.Series`
994  [1, 2] element of the local Wcs affine transform.
995  cd21 : `pandas.Series`
996  [2, 1] element of the local Wcs affine transform.
997  cd22 : `pandas.Series`
998  [2, 2] element of the local Wcs affine transform.
999 
1000  Returns
1001  -------
1002  Distance : `pandas.Series`
1003  Arcseconds per pixel at the location of the local WC
1004  """
1005  ra1, dec1 = self.computeDeltaRaDec(x1, y1, cd11, cd12, cd21, cd22)
1006  ra2, dec2 = self.computeDeltaRaDec(x2, y2, cd11, cd12, cd21, cd22)
1007  # Great circle distance for small separations.
1008  return self.computeSkySeperation(ra1, dec1, ra2, dec2)
1009 
1010 
1012  """Compute the local pixel scale from the stored CDMatrix.
1013  """
1014  name = "PixelScale"
1015 
1016  @property
1017  def columns(self):
1018  return [self.colCD_1_1,
1019  self.colCD_1_2,
1020  self.colCD_2_1,
1021  self.colCD_2_2]
1022 
1023  def pixelScaleArcseconds(self, cd11, cd12, cd21, cd22):
1024  """Compute the local pixel to scale conversion in arcseconds.
1025 
1026  Parameters
1027  ----------
1028  cd11 : `pandas.Series`
1029  [1, 1] element of the local Wcs affine transform in radians.
1030  cd11 : `pandas.Series`
1031  [1, 1] element of the local Wcs affine transform in radians.
1032  cd12 : `pandas.Series`
1033  [1, 2] element of the local Wcs affine transform in radians.
1034  cd21 : `pandas.Series`
1035  [2, 1] element of the local Wcs affine transform in radians.
1036  cd22 : `pandas.Series`
1037  [2, 2] element of the local Wcs affine transform in radians.
1038 
1039  Returns
1040  -------
1041  pixScale : `pandas.Series`
1042  Arcseconds per pixel at the location of the local WC
1043  """
1044  return 3600 * np.degrees(np.sqrt(np.fabs(cd11 * cd22 - cd12 * cd21)))
1045 
1046  def _func(self, df):
1047  return self.pixelScaleArcseconds(df[self.colCD_1_1],
1048  df[self.colCD_1_2],
1049  df[self.colCD_2_1],
1050  df[self.colCD_2_2])
1051 
1052 
1054  """Convert a value in units pixels to units arcseconds.
1055  """
1056 
1057  def __init__(self,
1058  col,
1059  colCD_1_1,
1060  colCD_1_2,
1061  colCD_2_1,
1062  colCD_2_2,
1063  **kwargs):
1064  self.col = col
1065  super().__init__(colCD_1_1,
1066  colCD_1_2,
1067  colCD_2_1,
1068  colCD_2_2,
1069  **kwargs)
1070 
1071  @property
1072  def name(self):
1073  return f"{self.col}_asArcseconds"
1074 
1075  @property
1076  def columns(self):
1077  return [self.col,
1078  self.colCD_1_1,
1079  self.colCD_1_2,
1080  self.colCD_2_1,
1081  self.colCD_2_2]
1082 
1083  def _func(self, df):
1084  return df[self.col] * self.pixelScaleArcseconds(df[self.colCD_1_1],
1085  df[self.colCD_1_2],
1086  df[self.colCD_2_1],
1087  df[self.colCD_2_2])
1088 
1089 
1091  name = 'Reference Band'
1092  shortname = 'refBand'
1093 
1094  @property
1095  def columns(self):
1096  return ["merge_measurement_i",
1097  "merge_measurement_r",
1098  "merge_measurement_z",
1099  "merge_measurement_y",
1100  "merge_measurement_g"]
1101 
1102  def _func(self, df):
1103  def getFilterAliasName(row):
1104  # get column name with the max value (True > False)
1105  colName = row.idxmax()
1106  return colName.replace('merge_measurement_', '')
1107 
1108  return df[self.columns].apply(getFilterAliasName, axis=1)
1109 
1110 
1112  # AB to NanoJansky (3631 Jansky)
1113  AB_FLUX_SCALE = (0 * u.ABmag).to_value(u.nJy)
1114  LOG_AB_FLUX_SCALE = 12.56
1115  FIVE_OVER_2LOG10 = 1.085736204758129569
1116  # TO DO: DM-21955 Replace hard coded photometic calibration values
1117  COADD_ZP = 27
1118 
1119  def __init__(self, colFlux, colFluxErr=None, calib=None, **kwargs):
1120  self.vhypot = np.vectorize(self.hypot)
1121  self.col = colFlux
1122  self.colFluxErr = colFluxErr
1123 
1124  self.calib = calib
1125  if calib is not None:
1126  self.fluxMag0, self.fluxMag0Err = calib.getFluxMag0()
1127  else:
1128  self.fluxMag0 = 1./np.power(10, -0.4*self.COADD_ZP)
1129  self.fluxMag0Err = 0.
1130 
1131  super().__init__(**kwargs)
1132 
1133  @property
1134  def columns(self):
1135  return [self.col]
1136 
1137  @property
1138  def name(self):
1139  return 'mag_{0}'.format(self.col)
1140 
1141  @classmethod
1142  def hypot(cls, a, b):
1143  if np.abs(a) < np.abs(b):
1144  a, b = b, a
1145  if a == 0.:
1146  return 0.
1147  q = b/a
1148  return np.abs(a) * np.sqrt(1. + q*q)
1149 
1150  def dn2flux(self, dn, fluxMag0):
1151  return self.AB_FLUX_SCALE * dn / fluxMag0
1152 
1153  def dn2mag(self, dn, fluxMag0):
1154  with np.warnings.catch_warnings():
1155  np.warnings.filterwarnings('ignore', r'invalid value encountered')
1156  np.warnings.filterwarnings('ignore', r'divide by zero')
1157  return -2.5 * np.log10(dn/fluxMag0)
1158 
1159  def dn2fluxErr(self, dn, dnErr, fluxMag0, fluxMag0Err):
1160  retVal = self.vhypot(dn * fluxMag0Err, dnErr * fluxMag0)
1161  retVal *= self.AB_FLUX_SCALE / fluxMag0 / fluxMag0
1162  return retVal
1163 
1164  def dn2MagErr(self, dn, dnErr, fluxMag0, fluxMag0Err):
1165  retVal = self.dn2fluxErr(dn, dnErr, fluxMag0, fluxMag0Err) / self.dn2flux(dn, fluxMag0)
1166  return self.FIVE_OVER_2LOG10 * retVal
1167 
1168 
1170  def _func(self, df):
1171  return self.dn2flux(df[self.col], self.fluxMag0)
1172 
1173 
1175  @property
1176  def columns(self):
1177  return [self.col, self.colFluxErr]
1178 
1179  def _func(self, df):
1180  retArr = self.dn2fluxErr(df[self.col], df[self.colFluxErr], self.fluxMag0, self.fluxMag0Err)
1181  return pd.Series(retArr, index=df.index)
1182 
1183 
1185  def _func(self, df):
1186  return self.dn2mag(df[self.col], self.fluxMag0)
1187 
1188 
1190  @property
1191  def columns(self):
1192  return [self.col, self.colFluxErr]
1193 
1194  def _func(self, df):
1195  retArr = self.dn2MagErr(df[self.col], df[self.colFluxErr], self.fluxMag0, self.fluxMag0Err)
1196  return pd.Series(retArr, index=df.index)
1197 
1198 
1200  """Base class for calibrating the specified instrument flux column using
1201  the local photometric calibration.
1202 
1203  Parameters
1204  ----------
1205  instFluxCol : `str`
1206  Name of the instrument flux column.
1207  instFluxErrCol : `str`
1208  Name of the assocated error columns for ``instFluxCol``.
1209  photoCalibCol : `str`
1210  Name of local calibration column.
1211  photoCalibErrCol : `str`
1212  Error associated with ``photoCalibCol``
1213 
1214  See also
1215  --------
1216  LocalPhotometry
1217  LocalNanojansky
1218  LocalNanojanskyErr
1219  LocalMagnitude
1220  LocalMagnitudeErr
1221  """
1222  logNJanskyToAB = (1 * u.nJy).to_value(u.ABmag)
1223 
1224  def __init__(self,
1225  instFluxCol,
1226  instFluxErrCol,
1227  photoCalibCol,
1228  photoCalibErrCol,
1229  **kwargs):
1230  self.instFluxCol = instFluxCol
1231  self.instFluxErrCol = instFluxErrCol
1232  self.photoCalibCol = photoCalibCol
1233  self.photoCalibErrCol = photoCalibErrCol
1234  super().__init__(**kwargs)
1235 
1236  def instFluxToNanojansky(self, instFlux, localCalib):
1237  """Convert instrument flux to nanojanskys.
1238 
1239  Parameters
1240  ----------
1241  instFlux : `numpy.ndarray` or `pandas.Series`
1242  Array of instrument flux measurements
1243  localCalib : `numpy.ndarray` or `pandas.Series`
1244  Array of local photometric calibration estimates.
1245 
1246  Returns
1247  -------
1248  calibFlux : `numpy.ndarray` or `pandas.Series`
1249  Array of calibrated flux measurements.
1250  """
1251  return instFlux * localCalib
1252 
1253  def instFluxErrToNanojanskyErr(self, instFlux, instFluxErr, localCalib, localCalibErr):
1254  """Convert instrument flux to nanojanskys.
1255 
1256  Parameters
1257  ----------
1258  instFlux : `numpy.ndarray` or `pandas.Series`
1259  Array of instrument flux measurements
1260  instFluxErr : `numpy.ndarray` or `pandas.Series`
1261  Errors on associated ``instFlux`` values
1262  localCalib : `numpy.ndarray` or `pandas.Series`
1263  Array of local photometric calibration estimates.
1264  localCalibErr : `numpy.ndarray` or `pandas.Series`
1265  Errors on associated ``localCalib`` values
1266 
1267  Returns
1268  -------
1269  calibFluxErr : `numpy.ndarray` or `pandas.Series`
1270  Errors on calibrated flux measurements.
1271  """
1272  return np.hypot(instFluxErr * localCalib, instFlux * localCalibErr)
1273 
1274  def instFluxToMagnitude(self, instFlux, localCalib):
1275  """Convert instrument flux to nanojanskys.
1276 
1277  Parameters
1278  ----------
1279  instFlux : `numpy.ndarray` or `pandas.Series`
1280  Array of instrument flux measurements
1281  localCalib : `numpy.ndarray` or `pandas.Series`
1282  Array of local photometric calibration estimates.
1283 
1284  Returns
1285  -------
1286  calibMag : `numpy.ndarray` or `pandas.Series`
1287  Array of calibrated AB magnitudes.
1288  """
1289  return -2.5 * np.log10(self.instFluxToNanojansky(instFlux, localCalib)) + self.logNJanskyToAB
1290 
1291  def instFluxErrToMagnitudeErr(self, instFlux, instFluxErr, localCalib, localCalibErr):
1292  """Convert instrument flux err to nanojanskys.
1293 
1294  Parameters
1295  ----------
1296  instFlux : `numpy.ndarray` or `pandas.Series`
1297  Array of instrument flux measurements
1298  instFluxErr : `numpy.ndarray` or `pandas.Series`
1299  Errors on associated ``instFlux`` values
1300  localCalib : `numpy.ndarray` or `pandas.Series`
1301  Array of local photometric calibration estimates.
1302  localCalibErr : `numpy.ndarray` or `pandas.Series`
1303  Errors on associated ``localCalib`` values
1304 
1305  Returns
1306  -------
1307  calibMagErr: `numpy.ndarray` or `pandas.Series`
1308  Error on calibrated AB magnitudes.
1309  """
1310  err = self.instFluxErrToNanojanskyErr(instFlux, instFluxErr, localCalib, localCalibErr)
1311  return 2.5 / np.log(10) * err / self.instFluxToNanojansky(instFlux, instFluxErr)
1312 
1313 
1315  """Compute calibrated fluxes using the local calibration value.
1316 
1317  See also
1318  --------
1319  LocalNanojansky
1320  LocalNanojanskyErr
1321  LocalMagnitude
1322  LocalMagnitudeErr
1323  """
1324 
1325  @property
1326  def columns(self):
1327  return [self.instFluxCol, self.photoCalibCol]
1328 
1329  @property
1330  def name(self):
1331  return f'flux_{self.instFluxCol}'
1332 
1333  def _func(self, df):
1334  return self.instFluxToNanojansky(df[self.instFluxCol], df[self.photoCalibCol])
1335 
1336 
1338  """Compute calibrated flux errors using the local calibration value.
1339 
1340  See also
1341  --------
1342  LocalNanojansky
1343  LocalNanojanskyErr
1344  LocalMagnitude
1345  LocalMagnitudeErr
1346  """
1347 
1348  @property
1349  def columns(self):
1350  return [self.instFluxCol, self.instFluxErrCol,
1351  self.photoCalibCol, self.photoCalibErrCol]
1352 
1353  @property
1354  def name(self):
1355  return f'fluxErr_{self.instFluxCol}'
1356 
1357  def _func(self, df):
1358  return self.instFluxErrToNanojanskyErr(df[self.instFluxCol], df[self.instFluxErrCol],
1359  df[self.photoCalibCol], df[self.photoCalibErrCol])
1360 
1361 
1363  """Compute calibrated AB magnitudes using the local calibration value.
1364 
1365  See also
1366  --------
1367  LocalNanojansky
1368  LocalNanojanskyErr
1369  LocalMagnitude
1370  LocalMagnitudeErr
1371  """
1372 
1373  @property
1374  def columns(self):
1375  return [self.instFluxCol, self.photoCalibCol]
1376 
1377  @property
1378  def name(self):
1379  return f'mag_{self.instFluxCol}'
1380 
1381  def _func(self, df):
1382  return self.instFluxToMagnitude(df[self.instFluxCol],
1383  df[self.photoCalibCol])
1384 
1385 
1387  """Compute calibrated AB magnitude errors using the local calibration value.
1388 
1389  See also
1390  --------
1391  LocalNanojansky
1392  LocalNanojanskyErr
1393  LocalMagnitude
1394  LocalMagnitudeErr
1395  """
1396 
1397  @property
1398  def columns(self):
1399  return [self.instFluxCol, self.instFluxErrCol,
1400  self.photoCalibCol, self.photoCalibErrCol]
1401 
1402  @property
1403  def name(self):
1404  return f'magErr_{self.instFluxCol}'
1405 
1406  def _func(self, df):
1407  return self.instFluxErrToMagnitudeErr(df[self.instFluxCol],
1408  df[self.instFluxErrCol],
1409  df[self.photoCalibCol],
1410  df[self.photoCalibErrCol])
def instFluxToNanojansky(self, instFlux, localCalib)
Definition: functors.py:1236
def __init__(self, expr, kwargs)
Definition: functors.py:382
def __call__(self, parq, dropna=False)
Definition: functors.py:182
def __init__(self, col, kwargs)
Definition: functors.py:413
def dn2MagErr(self, dn, dnErr, fluxMag0, fluxMag0Err)
Definition: functors.py:1164
def instFluxErrToNanojanskyErr(self, instFlux, instFluxErr, localCalib, localCalibErr)
Definition: functors.py:1253
def __call__(self, catalog, kwargs)
Definition: functors.py:477
def _func(self, df, dropna=True)
Definition: functors.py:156
def __call__(self, parq, kwargs)
Definition: functors.py:282
def __call__(self, catalog, kwargs)
Definition: functors.py:490
def __init__(self, colXX, colXY, colYY, kwargs)
Definition: functors.py:890
def __init__(self, col1, col2, kwargs)
Definition: functors.py:609
def multilevelColumns(self, parq)
Definition: functors.py:691
def __init__(self, col, filt2, filt1, kwargs)
Definition: functors.py:662
def init_fromDict(initDict, basePath='lsst.pipe.tasks.functors', typeKey='functor', name=None)
Definition: functors.py:13
def __call__(self, parq, dropna=False, kwargs)
Definition: functors.py:713
def computeDeltaRaDec(self, x, y, cd11, cd12, cd21, cd22)
Definition: functors.py:921
def mag_aware_eval(df, expr)
Definition: functors.py:347
def renameCol(cls, col, renameRules)
Definition: functors.py:310
def __init__(self, instFluxCol, instFluxErrCol, photoCalibCol, photoCalibErrCol, kwargs)
Definition: functors.py:1229
def __init__(self, filt=None, dataset=None, noDup=None)
Definition: functors.py:114
def __init__(self, colXX, colXY, colYY, kwargs)
Definition: functors.py:856
def computeSkySeperation(self, ra1, dec1, ra2, dec2)
Definition: functors.py:950
def from_yaml(cls, translationDefinition, kwargs)
Definition: functors.py:326
def from_file(cls, filename, kwargs)
Definition: functors.py:319
def __init__(self, colFlux, colFluxErr=None, calib=None, kwargs)
Definition: functors.py:1119
def __init__(self, kwargs)
Definition: functors.py:474
def dn2mag(self, dn, fluxMag0)
Definition: functors.py:1153
def __init__(self, col, calib=None, kwargs)
Definition: functors.py:532
def __init__(self, args, kwargs)
Definition: functors.py:570
def dn2fluxErr(self, dn, dnErr, fluxMag0, fluxMag0Err)
Definition: functors.py:1159
def multilevelColumns(self, parq)
Definition: functors.py:134
def dn2flux(self, dn, fluxMag0)
Definition: functors.py:1150
def __init__(self, funcs, kwargs)
Definition: functors.py:240
def instFluxErrToMagnitudeErr(self, instFlux, instFluxErr, localCalib, localCalibErr)
Definition: functors.py:1291
def __init__(self, colXX, colXY, colYY, kwargs)
Definition: functors.py:874
def instFluxToMagnitude(self, instFlux, localCalib)
Definition: functors.py:1274
def __init__(self, colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, kwargs)
Definition: functors.py:914
def getSkySeperationFromPixel(self, x1, y1, x2, y2, cd11, cd12, cd21, cd22)
Definition: functors.py:976
def __init__(self, col, colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, kwargs)
Definition: functors.py:1063
def pixelScaleArcseconds(self, cd11, cd12, cd21, cd22)
Definition: functors.py:1023
def __init__(self, col, kwargs)
Definition: functors.py:459