Skip to content

Commit

Permalink
bug where weights was cast as int.
Browse files Browse the repository at this point in the history
  • Loading branch information
kiyo-masui committed Jul 5, 2012
1 parent cd57dc7 commit ae00e3d
Show file tree
Hide file tree
Showing 10 changed files with 1,135 additions and 90 deletions.
52 changes: 31 additions & 21 deletions cal/correlate_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,22 +152,22 @@ def execute(self, nprocesses=1) :
norm[norm==0] = 1
gains = corr / norm
gain_dict[key] = gains
# XXX
#plt.figure()
#plt.plot(freq_dict[key][0,:], gains[0,0,0,:], '.')
##plt.plot(freq_dict[key][0,:],
## (gains[0,0,0,:] + gains[0,0,1,:])/2., '.')
##plt.plot(freq_dict[key][0,:],
## (gains[0,3,0,:] + gains[0,3,1,:])/2., '.')
#if params['diff_gain_cal_only']:
# plt.plot(freq_dict[key][0,:],
# (gains[0,0,0,:] + gains[0,0,1,:])/2., '.')
# plt.plot(freq_dict[key][0,:],
# (gains[0,3,0,:] + gains[0,3,1,:])/2., '.')
#else:
# plt.plot(freq_dict[key][0,:], gains[0,0,0,:], '.')
#plt.title(key)
#plt.xlabel('Frequency (Hz)')
#plt.ylabel('Correlation amplitude')
# XXX
out_db[key + '.gains'] = gains
out_db[key + '.freq'] = freq_dict[key]
#if not params['diff_gain_cal_only']:
# plt.show()
out_db.close()
# XXX
#plt.show()
#### Apply the calibration to the data. ####
for middle in file_middles:
key = get_key(middle)
Expand Down Expand Up @@ -201,7 +201,7 @@ def process_file(self, middle, Pipe=None):
# Allowcate memory for the outputs.
corr = np.zeros((n_bands_proc, n_pol, n_cal, n_chan),
dtype=float)
norm = np.zeros(corr.shape, dtype=int)
norm = np.zeros(corr.shape, dtype=float)
freq = np.empty((n_bands_proc, n_chan))
for ii in range(n_bands_proc):
Blocks = Reader.read((), ii)
Expand Down Expand Up @@ -240,8 +240,18 @@ def process_file(self, middle, Pipe=None):
interpolation=params['interpolation'],
modes_subtract=params['smooth_modes_subtract'],
filter_type=params['filter_type'])
corr[ii,...] += this_corr
norm[ii,...] += this_norm
# Check that the answer we got is sane, if not, throw away the
# this set.
tmp_corr = this_corr.copy()
tmp_norm = this_norm.copy()
tmp_corr[tmp_norm == 0] = 1.
tmp_norm[tmp_norm == 0] = 1.
tmp_gains = tmp_corr / tmp_norm
if np.all(tmp_gains < 2) and np.all(tmp_gains > 0.5):
corr[ii,...] += this_corr
norm[ii,...] += this_norm
else:
pass
if Pipe is None:
return key, corr, norm, freq
else:
Expand Down Expand Up @@ -439,7 +449,8 @@ def get_correlation(Data, maps, interpolation='nearest', modes_subtract=2,
# it. Be carefull about the 0 information case (all masked).
filled_norm = norm.copy()
# No information.
bad_inds = norm == 0
bad_inds = np.logical_or(norm == 0, np.sum(un_mask, 0) <
2 * modes_subtract)
filled_norm[bad_inds] = 1
amp = corr / filled_norm
fit = submap * amp
Expand All @@ -452,17 +463,16 @@ def get_correlation(Data, maps, interpolation='nearest', modes_subtract=2,
# Store results in output arrays
correlation[ii,:,:] = corr
normalization[ii,:,:] = norm
if False and (ii == 0 or ii == 3):
if False and (ii == 0) and int(Data.field['SCAN']) == 86:
print correlation[ii,:,:]
print normalization[ii,:,:]
print chi_sq
print inv_chi_sq
print correlation[ii,:,:] / normalization[ii,:,:]
plt.plot(on_map_time, (subdata * un_mask)[:,0,0], '.')
plt.plot(on_map_time, (submap * un_mask)[:,0,0], '.')
plt.figure()
plt.plot((subdata * un_mask)[:,0,0] - (submap * un_mask)[:,0,0],
'.')
plt.plot((subdata * un_mask)[:,0,3] - (submap * un_mask)[:,0,3],
'.')
plt.plot(on_map_time, (fit * un_mask)[:,0,0], '.')
#plt.plot((subdata * un_mask)[:,0,0] - (submap * un_mask)[:,0,0],
# '.')
#plt.plot((subdata * un_mask)[:,0,3] - (submap * un_mask)[:,0,3],
# '.')
plt.show()
return correlation, normalization
60 changes: 40 additions & 20 deletions cal/drift_script.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,32 +74,52 @@ def chi_2(params, data):

initial_params = np.array([500., 100., 15., 15., 0., 0., 10., 10., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0.])
npar = len(initial_params)

for Data in Blocks:
nt = Data.data.shape[0]
for ii in [30, 100]:
this_data = Data.data[:,:,:,ii]
nf = Data.data.shape[3]
p = np.zeros((npar, nf))
print
for ii in range(10,nf):
print ii,
this_data = Data.data[:,:,:,ii].copy()
this_data = np.rollaxis(this_data, 0, 3)
this_chi_2 = lambda x: chi_2(x, this_data)
fit, err = optimize.leastsq(this_chi_2, initial_params)
p[:,ii] = fit
this_model = model(*(nt,) + tuple(fit))
print fit
plt.figure()
plt.plot(this_data[0,1,:])
plt.plot(this_data[3,1,:])
plt.plot(this_model[0,1,:])
plt.plot(this_model[3,1,:])
plt.figure()
plt.plot(this_data[1,1,:])
plt.plot(this_data[2,1,:])
plt.plot(this_model[1,1,:])
plt.plot(this_model[2,1,:])
plt.figure()
plt.plot(this_data[0,0,:] - this_data[0,1,:])
plt.plot(this_data[3,0,:] - this_data[3,1,:])
plt.plot(this_data[1,0,:] - this_data[1,1,:])
plt.plot(this_model[0,0,:] - this_model[0,1,:])
plt.plot(this_model[3,0,:] - this_model[3,1,:])
plt.plot(this_model[1,0,:] - this_model[1,1,:])
#print fit
#plt.figure()
#plt.plot(this_data[0,1,:])
#plt.plot(this_data[3,1,:])
#plt.plot(this_model[0,1,:])
#plt.plot(this_model[3,1,:])
#plt.figure()
#plt.plot(this_data[1,1,:])
#plt.plot(this_data[2,1,:])
#plt.plot(this_model[1,1,:])
#plt.plot(this_model[2,1,:])
#plt.figure()
#plt.plot(this_data[0,0,:] - this_data[0,1,:])
#plt.plot(this_data[3,0,:] - this_data[3,1,:])
#plt.plot(this_data[1,0,:] - this_data[1,1,:])
#plt.plot(this_model[0,0,:] - this_model[0,1,:])
#plt.plot(this_model[3,0,:] - this_model[3,1,:])
#plt.plot(this_model[1,0,:] - this_model[1,1,:])
gains = np.sqrt(p[2,:] * p[3,:]).real
gains[gains < 0.6 * np.mean(gains)] = np.mean(gains) * 1000000
plt.figure()
plt.plot(gains)
plt.plot(p[2,:])
plt.plot(p[3,:])
U_data = Data.data[:,1,1,:].copy().filled(0) / gains
plt.figure()
plt.imshow(U_data)
plt.colorbar()
V_data = Data.data[:,2,1,:].copy().filled(0) / gains
plt.figure()
plt.imshow(V_data)
plt.colorbar()


75 changes: 73 additions & 2 deletions foreground/ts_measure.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,16 @@
"input_end" : ".fits",
"output_root" : "./",
"output_filename" : "foreground_modes.shelve",
"n_modes_removed" : 0,
"output_end" : '.fits',
"scans" : (),
"IFs" : ()
}

prefix = 'tf_'

class Measure(object) :
"""Measures the noise of data files.
"""Measures foregrounds in the data files and cleans them.
"""

def __init__(self, parameter_file_or_dict=None, feedback=2) :
Expand Down Expand Up @@ -98,8 +100,51 @@ def execute(self, nprocesses=1) :
#plt.show()
out_db[key + '.vects'] = eigen_vects
out_db[key + '.freq'] = freq_dict[key]
if params['n_modes_removed']:
for middle in file_middles:
key = get_key(middle)
modes = out_db[key + '.vects']
modes = modes[:,:,:,:,-params['n_modes_removed']:]
self.clean_file(middle, modes)
# Close the data base.
out_db.close()

def clean_file(self, middle, modes):
params = self.params
file_name = (params['input_root'] + middle
+ params['input_end'])
# Output parameters.
Writer = core.fitsGBT.Writer(feedback=self.feedback)
out_filename = (params['output_root'] + middle
+ params['output_end'])
band_inds = params["IFs"]
Reader = core.fitsGBT.Reader(file_name, feedback=self.feedback)
n_bands = len(Reader.IF_set)
if not band_inds:
band_inds = range(n_bands)
# Number of bands we acctually process.
n_bands_proc = len(band_inds)
if not band_inds:
band_inds = range(n_bands)
# Number of bands we acctually process.
n_bands_proc = len(band_inds)
# Get the key that will group this file with other files.
key = get_key(middle)
# Read one block to figure out how many polarizations and channels
# there are.
Data = Reader.read(0,0)
n_pol = Data.dims[1]
n_cal = Data.dims[2]
n_chan = Data.dims[3]
for ii in range(n_bands_proc):
Blocks = Reader.read((), ii)
this_band_modes = modes[ii,...]
for Data in Blocks:
clean_data(Data, this_band_modes)
Writer.add_data(Data)
# Write the data back out.
utils.mkparents(out_filename)
Writer.write(out_filename)

def process_file(self, middle):
params = self.params
Expand Down Expand Up @@ -144,9 +189,35 @@ def get_key(middle):
# For now just use the session number as the key.
separate = middle.split('/')[-1]
sess_num = separate.split('_')[0]
key = sess_num
# XXX
#key = sess_num
key = middle
return key

def clean_data(Data, modes):
data = Data.data.filled(0)
mask = ma.getmaskarray(Data.data)
weights = np.logical_not(mask)
# Remove mean and rezero mask.
counts = np.sum(weights, 0)
counts[counts==0] = 1
data = data - np.sum(data, 0) / counts
data *= weights
# Get the normalization at each time, pol, cal, mode (summing over
# frequency). Usually 1 unless there is a masked
# element.
norms = np.sum(weights[:,:,:,:,None] * modes[None,:,:,:,:]**2, -2)
norms[norms==0] = 1.
# Get amplitude of the mode for each time, pol, cal, mode (summing over
# frequency).
amps = np.sum(data[:,:,:,:,None] * modes[None,:,:,:,:], -2)
amps /= norms
# Project back into the modes to get frequency axis. Collapse mode axis
# to get something the shape of the data.
to_subtract = np.sum(amps[:,:,:,None,:] * modes[None,:,:,:,:], -1)
# Subtract it out.
Data.data -= to_subtract

def get_covar(Data):

data = Data.data.filled(0)
Expand Down
Loading

0 comments on commit ae00e3d

Please sign in to comment.